From 4c1d8013f3031fb75c540d6918d080db6090d25c Mon Sep 17 00:00:00 2001
From: Becca Petrin
Date: Mon, 21 May 2018 17:04:26 -0700
Subject: [PATCH 01/39] move fields and field parsing to helper (#4603)
---
builtin/credential/ldap/path_config.go | 246 +-----------------------
helper/ldaputil/config.go | 252 +++++++++++++++++++++++++
2 files changed, 256 insertions(+), 242 deletions(-)
diff --git a/builtin/credential/ldap/path_config.go b/builtin/credential/ldap/path_config.go
index 7632d0526..81237930b 100644
--- a/builtin/credential/ldap/path_config.go
+++ b/builtin/credential/ldap/path_config.go
@@ -2,16 +2,9 @@ package ldap
import (
"context"
- "crypto/x509"
- "encoding/pem"
- "fmt"
- "strings"
- "text/template"
- "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/ldaputil"
- "github.com/hashicorp/vault/helper/tlsutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
@@ -19,105 +12,7 @@ import (
func pathConfig(b *backend) *framework.Path {
return &framework.Path{
Pattern: `config`,
- Fields: map[string]*framework.FieldSchema{
- "url": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "ldap://127.0.0.1",
- Description: "LDAP URL to connect to (default: ldap://127.0.0.1). Multiple URLs can be specified by concatenating them with commas; they will be tried in-order.",
- },
-
- "userdn": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "LDAP domain to use for users (eg: ou=People,dc=example,dc=org)",
- },
-
- "binddn": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "LDAP DN for searching for the user DN (optional)",
- },
-
- "bindpass": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "LDAP password for searching for the user DN (optional)",
- },
-
- "groupdn": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "LDAP search base to use for group membership search (eg: ou=Groups,dc=example,dc=org)",
- },
-
- "groupfilter": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))",
- Description: `Go template for querying group membership of user (optional)
-The template can access the following context variables: UserDN, Username
-Example: (&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))
-Default: (|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))`,
- },
-
- "groupattr": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "cn",
- Description: `LDAP attribute to follow on objects returned by
-in order to enumerate user group membership.
-Examples: "cn" or "memberOf", etc.
-Default: cn`,
- },
-
- "upndomain": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Enables userPrincipalDomain login with [username]@UPNDomain (optional)",
- },
-
- "userattr": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "cn",
- Description: "Attribute used for users (default: cn)",
- },
-
- "certificate": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "CA certificate to use when verifying LDAP server certificate, must be x509 PEM encoded (optional)",
- },
-
- "discoverdn": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: "Use anonymous bind to discover the bind DN of a user (optional)",
- },
-
- "insecure_tls": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: "Skip LDAP server SSL Certificate verification - VERY insecure (optional)",
- },
-
- "starttls": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: "Issue a StartTLS command after establishing unencrypted connection (optional)",
- },
-
- "tls_min_version": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "tls12",
- Description: "Minimum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'",
- },
-
- "tls_max_version": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "tls12",
- Description: "Maximum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'",
- },
-
- "deny_null_bind": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: "Denies an unauthenticated LDAP bind request if the user's password is empty; defaults to true",
- },
-
- "case_sensitive_names": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: "If true, case sensitivity will be used when comparing usernames and groups for matching policies.",
- },
- },
+ Fields: ldaputil.ConfigFields(),
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: b.pathConfigRead,
@@ -140,7 +35,7 @@ func (b *backend) Config(ctx context.Context, req *logical.Request) (*ldaputil.C
}
// Create a new ConfigEntry, filling in defaults where appropriate
- result, err := b.newConfigEntry(fd)
+ result, err := ldaputil.NewConfigEntry(fd)
if err != nil {
return nil, err
}
@@ -195,147 +90,14 @@ func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, d *f
}
resp := &logical.Response{
- Data: map[string]interface{}{
- "url": cfg.Url,
- "userdn": cfg.UserDN,
- "groupdn": cfg.GroupDN,
- "groupfilter": cfg.GroupFilter,
- "groupattr": cfg.GroupAttr,
- "upndomain": cfg.UPNDomain,
- "userattr": cfg.UserAttr,
- "certificate": cfg.Certificate,
- "insecure_tls": cfg.InsecureTLS,
- "starttls": cfg.StartTLS,
- "binddn": cfg.BindDN,
- "deny_null_bind": cfg.DenyNullBind,
- "discoverdn": cfg.DiscoverDN,
- "tls_min_version": cfg.TLSMinVersion,
- "tls_max_version": cfg.TLSMaxVersion,
- "case_sensitive_names": *cfg.CaseSensitiveNames,
- },
+ Data: cfg.PasswordlessMap(),
}
return resp, nil
}
-/*
- * Creates and initializes a ConfigEntry object with its default values,
- * as specified by the passed schema.
- */
-func (b *backend) newConfigEntry(d *framework.FieldData) (*ldaputil.ConfigEntry, error) {
- cfg := new(ldaputil.ConfigEntry)
-
- url := d.Get("url").(string)
- if url != "" {
- cfg.Url = strings.ToLower(url)
- }
- userattr := d.Get("userattr").(string)
- if userattr != "" {
- cfg.UserAttr = strings.ToLower(userattr)
- }
- userdn := d.Get("userdn").(string)
- if userdn != "" {
- cfg.UserDN = userdn
- }
- groupdn := d.Get("groupdn").(string)
- if groupdn != "" {
- cfg.GroupDN = groupdn
- }
- groupfilter := d.Get("groupfilter").(string)
- if groupfilter != "" {
- // Validate the template before proceeding
- _, err := template.New("queryTemplate").Parse(groupfilter)
- if err != nil {
- return nil, errwrap.Wrapf("invalid groupfilter: {{err}}", err)
- }
-
- cfg.GroupFilter = groupfilter
- }
- groupattr := d.Get("groupattr").(string)
- if groupattr != "" {
- cfg.GroupAttr = groupattr
- }
- upndomain := d.Get("upndomain").(string)
- if upndomain != "" {
- cfg.UPNDomain = upndomain
- }
- certificate := d.Get("certificate").(string)
- if certificate != "" {
- block, _ := pem.Decode([]byte(certificate))
-
- if block == nil || block.Type != "CERTIFICATE" {
- return nil, fmt.Errorf("failed to decode PEM block in the certificate")
- }
- _, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- return nil, errwrap.Wrapf("failed to parse certificate: {{err}}", err)
- }
- cfg.Certificate = certificate
- }
- insecureTLS := d.Get("insecure_tls").(bool)
- if insecureTLS {
- cfg.InsecureTLS = insecureTLS
- }
- cfg.TLSMinVersion = d.Get("tls_min_version").(string)
- if cfg.TLSMinVersion == "" {
- return nil, fmt.Errorf("failed to get 'tls_min_version' value")
- }
-
- var ok bool
- _, ok = tlsutil.TLSLookup[cfg.TLSMinVersion]
- if !ok {
- return nil, fmt.Errorf("invalid 'tls_min_version'")
- }
-
- cfg.TLSMaxVersion = d.Get("tls_max_version").(string)
- if cfg.TLSMaxVersion == "" {
- return nil, fmt.Errorf("failed to get 'tls_max_version' value")
- }
-
- _, ok = tlsutil.TLSLookup[cfg.TLSMaxVersion]
- if !ok {
- return nil, fmt.Errorf("invalid 'tls_max_version'")
- }
- if cfg.TLSMaxVersion < cfg.TLSMinVersion {
- return nil, fmt.Errorf("'tls_max_version' must be greater than or equal to 'tls_min_version'")
- }
-
- startTLS := d.Get("starttls").(bool)
- if startTLS {
- cfg.StartTLS = startTLS
- }
-
- bindDN := d.Get("binddn").(string)
- if bindDN != "" {
- cfg.BindDN = bindDN
- }
-
- bindPass := d.Get("bindpass").(string)
- if bindPass != "" {
- cfg.BindPassword = bindPass
- }
-
- denyNullBind := d.Get("deny_null_bind").(bool)
- if denyNullBind {
- cfg.DenyNullBind = denyNullBind
- }
-
- discoverDN := d.Get("discoverdn").(bool)
- if discoverDN {
- cfg.DiscoverDN = discoverDN
- }
-
- caseSensitiveNames, ok := d.GetOk("case_sensitive_names")
- if ok {
- cfg.CaseSensitiveNames = new(bool)
- *cfg.CaseSensitiveNames = caseSensitiveNames.(bool)
- }
-
- return cfg, nil
-}
-
func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
// Build a ConfigEntry struct out of the supplied FieldData
- cfg, err := b.newConfigEntry(d)
+ cfg, err := ldaputil.NewConfigEntry(d)
if err != nil {
return logical.ErrorResponse(err.Error()), nil
}
diff --git a/helper/ldaputil/config.go b/helper/ldaputil/config.go
index f62da7575..349708db0 100644
--- a/helper/ldaputil/config.go
+++ b/helper/ldaputil/config.go
@@ -5,10 +5,235 @@ import (
"encoding/pem"
"errors"
"fmt"
+ "strings"
+ "text/template"
"github.com/hashicorp/vault/helper/tlsutil"
+ "github.com/hashicorp/vault/logical/framework"
+
+ "github.com/hashicorp/errwrap"
)
+// ConfigFields returns all the config fields that can potentially be used by the LDAP client.
+// Not all fields will be used by every integration.
+func ConfigFields() map[string]*framework.FieldSchema {
+ return map[string]*framework.FieldSchema{
+ "url": {
+ Type: framework.TypeString,
+ Default: "ldap://127.0.0.1",
+ Description: "LDAP URL to connect to (default: ldap://127.0.0.1). Multiple URLs can be specified by concatenating them with commas; they will be tried in-order.",
+ },
+
+ "userdn": {
+ Type: framework.TypeString,
+ Description: "LDAP domain to use for users (eg: ou=People,dc=example,dc=org)",
+ },
+
+ "binddn": {
+ Type: framework.TypeString,
+ Description: "LDAP DN for searching for the user DN (optional)",
+ },
+
+ "bindpass": {
+ Type: framework.TypeString,
+ Description: "LDAP password for searching for the user DN (optional)",
+ },
+
+ "groupdn": {
+ Type: framework.TypeString,
+ Description: "LDAP search base to use for group membership search (eg: ou=Groups,dc=example,dc=org)",
+ },
+
+ "groupfilter": {
+ Type: framework.TypeString,
+ Default: "(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))",
+ Description: `Go template for querying group membership of user (optional)
+The template can access the following context variables: UserDN, Username
+Example: (&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))
+Default: (|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))`,
+ },
+
+ "groupattr": {
+ Type: framework.TypeString,
+ Default: "cn",
+ Description: `LDAP attribute to follow on objects returned by
+in order to enumerate user group membership.
+Examples: "cn" or "memberOf", etc.
+Default: cn`,
+ },
+
+ "upndomain": {
+ Type: framework.TypeString,
+ Description: "Enables userPrincipalDomain login with [username]@UPNDomain (optional)",
+ },
+
+ "userattr": {
+ Type: framework.TypeString,
+ Default: "cn",
+ Description: "Attribute used for users (default: cn)",
+ },
+
+ "certificate": {
+ Type: framework.TypeString,
+ Description: "CA certificate to use when verifying LDAP server certificate, must be x509 PEM encoded (optional)",
+ },
+
+ "discoverdn": {
+ Type: framework.TypeBool,
+ Description: "Use anonymous bind to discover the bind DN of a user (optional)",
+ },
+
+ "insecure_tls": {
+ Type: framework.TypeBool,
+ Description: "Skip LDAP server SSL Certificate verification - VERY insecure (optional)",
+ },
+
+ "starttls": {
+ Type: framework.TypeBool,
+ Description: "Issue a StartTLS command after establishing unencrypted connection (optional)",
+ },
+
+ "tls_min_version": {
+ Type: framework.TypeString,
+ Default: "tls12",
+ Description: "Minimum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'",
+ },
+
+ "tls_max_version": {
+ Type: framework.TypeString,
+ Default: "tls12",
+ Description: "Maximum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'",
+ },
+
+ "deny_null_bind": {
+ Type: framework.TypeBool,
+ Default: true,
+ Description: "Denies an unauthenticated LDAP bind request if the user's password is empty; defaults to true",
+ },
+
+ "case_sensitive_names": {
+ Type: framework.TypeBool,
+ Description: "If true, case sensitivity will be used when comparing usernames and groups for matching policies.",
+ },
+ }
+}
+
+/*
+ * Creates and initializes a ConfigEntry object with its default values,
+ * as specified by the passed schema.
+ */
+func NewConfigEntry(d *framework.FieldData) (*ConfigEntry, error) {
+ cfg := new(ConfigEntry)
+
+ url := d.Get("url").(string)
+ if url != "" {
+ cfg.Url = strings.ToLower(url)
+ }
+ userattr := d.Get("userattr").(string)
+ if userattr != "" {
+ cfg.UserAttr = strings.ToLower(userattr)
+ }
+ userdn := d.Get("userdn").(string)
+ if userdn != "" {
+ cfg.UserDN = userdn
+ }
+ groupdn := d.Get("groupdn").(string)
+ if groupdn != "" {
+ cfg.GroupDN = groupdn
+ }
+ groupfilter := d.Get("groupfilter").(string)
+ if groupfilter != "" {
+ // Validate the template before proceeding
+ _, err := template.New("queryTemplate").Parse(groupfilter)
+ if err != nil {
+ return nil, errwrap.Wrapf("invalid groupfilter: {{err}}", err)
+ }
+
+ cfg.GroupFilter = groupfilter
+ }
+ groupattr := d.Get("groupattr").(string)
+ if groupattr != "" {
+ cfg.GroupAttr = groupattr
+ }
+ upndomain := d.Get("upndomain").(string)
+ if upndomain != "" {
+ cfg.UPNDomain = upndomain
+ }
+ certificate := d.Get("certificate").(string)
+ if certificate != "" {
+ block, _ := pem.Decode([]byte(certificate))
+
+ if block == nil || block.Type != "CERTIFICATE" {
+ return nil, fmt.Errorf("failed to decode PEM block in the certificate")
+ }
+ _, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, errwrap.Wrapf("failed to parse certificate: {{err}}", err)
+ }
+ cfg.Certificate = certificate
+ }
+ insecureTLS := d.Get("insecure_tls").(bool)
+ if insecureTLS {
+ cfg.InsecureTLS = insecureTLS
+ }
+ cfg.TLSMinVersion = d.Get("tls_min_version").(string)
+ if cfg.TLSMinVersion == "" {
+ return nil, fmt.Errorf("failed to get 'tls_min_version' value")
+ }
+
+ var ok bool
+ _, ok = tlsutil.TLSLookup[cfg.TLSMinVersion]
+ if !ok {
+ return nil, fmt.Errorf("invalid 'tls_min_version'")
+ }
+
+ cfg.TLSMaxVersion = d.Get("tls_max_version").(string)
+ if cfg.TLSMaxVersion == "" {
+ return nil, fmt.Errorf("failed to get 'tls_max_version' value")
+ }
+
+ _, ok = tlsutil.TLSLookup[cfg.TLSMaxVersion]
+ if !ok {
+ return nil, fmt.Errorf("invalid 'tls_max_version'")
+ }
+ if cfg.TLSMaxVersion < cfg.TLSMinVersion {
+ return nil, fmt.Errorf("'tls_max_version' must be greater than or equal to 'tls_min_version'")
+ }
+
+ startTLS := d.Get("starttls").(bool)
+ if startTLS {
+ cfg.StartTLS = startTLS
+ }
+
+ bindDN := d.Get("binddn").(string)
+ if bindDN != "" {
+ cfg.BindDN = bindDN
+ }
+
+ bindPass := d.Get("bindpass").(string)
+ if bindPass != "" {
+ cfg.BindPassword = bindPass
+ }
+
+ denyNullBind := d.Get("deny_null_bind").(bool)
+ if denyNullBind {
+ cfg.DenyNullBind = denyNullBind
+ }
+
+ discoverDN := d.Get("discoverdn").(bool)
+ if discoverDN {
+ cfg.DiscoverDN = discoverDN
+ }
+
+ caseSensitiveNames, ok := d.GetOk("case_sensitive_names")
+ if ok {
+ cfg.CaseSensitiveNames = new(bool)
+ *cfg.CaseSensitiveNames = caseSensitiveNames.(bool)
+ }
+
+ return cfg, nil
+}
+
type ConfigEntry struct {
Url string `json:"url"`
UserDN string `json:"userdn"`
@@ -34,6 +259,33 @@ type ConfigEntry struct {
CaseSensitiveNames *bool `json:"CaseSensitiveNames,omitempty"`
}
+func (c *ConfigEntry) Map() map[string]interface{} {
+ m := c.PasswordlessMap()
+ m["bindpass"] = c.BindPassword
+ return m
+}
+
+func (c *ConfigEntry) PasswordlessMap() map[string]interface{} {
+ return map[string]interface{}{
+ "url": c.Url,
+ "userdn": c.UserDN,
+ "groupdn": c.GroupDN,
+ "groupfilter": c.GroupFilter,
+ "groupattr": c.GroupAttr,
+ "upndomain": c.UPNDomain,
+ "userattr": c.UserAttr,
+ "certificate": c.Certificate,
+ "insecure_tls": c.InsecureTLS,
+ "starttls": c.StartTLS,
+ "binddn": c.BindDN,
+ "deny_null_bind": c.DenyNullBind,
+ "discoverdn": c.DiscoverDN,
+ "tls_min_version": c.TLSMinVersion,
+ "tls_max_version": c.TLSMaxVersion,
+ "case_sensitive_names": *c.CaseSensitiveNames,
+ }
+}
+
func (c *ConfigEntry) Validate() error {
if len(c.Url) == 0 {
return errors.New("at least one url must be provided")
From 013e4e4d819b5b43bd1dcd9f7d5e8826a968c604 Mon Sep 17 00:00:00 2001
From: Dan Brown
Date: Tue, 22 May 2018 05:30:13 -0700
Subject: [PATCH 02/39] Fix typo (#4607)
---
command/base.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/command/base.go b/command/base.go
index f86f8b622..79f8cf40a 100644
--- a/command/base.go
+++ b/command/base.go
@@ -284,7 +284,7 @@ func (c *BaseCommand) flagSet(bit FlagSetBit) *FlagSets {
Usage: "Print only the field with the given name. Specifying " +
"this option will take precedence over other formatting " +
"directives. The result will not have a trailing newline " +
- "making it idea for piping to other processes.",
+ "making it ideal for piping to other processes.",
})
}
From 3db5a6adaa64ff1fd24fc111f22a28ba79940deb Mon Sep 17 00:00:00 2001
From: Chris Hoffman
Date: Tue, 22 May 2018 10:00:20 -0400
Subject: [PATCH 03/39] updating link
---
website/source/docs/concepts/ha.html.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/docs/concepts/ha.html.md b/website/source/docs/concepts/ha.html.md
index fdac2c164..8ae84ba0d 100644
--- a/website/source/docs/concepts/ha.html.md
+++ b/website/source/docs/concepts/ha.html.md
@@ -16,7 +16,7 @@ You can tell if a data store supports high availability mode ("HA") by starting
the server and seeing if "(HA available)" is output next to the data store
information. If it is, then Vault will automatically use HA mode. This
information is also available on the
-[Configuration](https://www.vaultproject.io/docs/configuration/index.html) page.
+[Configuration](/docs/configuration/index.html) page.
To be highly available, one of the Vault server nodes grabs a lock within the
data store. The successful server node then becomes the active node; all other
From bc50ec113ad1c1edf23b8d7d5bb295851d2f225b Mon Sep 17 00:00:00 2001
From: Jeff Mitchell
Date: Tue, 22 May 2018 10:39:24 -0400
Subject: [PATCH 04/39] changelog++
---
CHANGELOG.md | 3 +++
1 file changed, 3 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index acf762bf7..99a9f8dd5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,6 +11,9 @@ FEATURES:
* Cert auth CIDR restrictions: When using the `cert` auth method you can now
limit authentication to specific CIDRs; these will also be encoded in
resultant tokens to limit their use.
+ * Userpass auth CIDR restrictions: When using the `userpass` auth method you
+ can now limit authentication to specific CIDRs; these will also be encoded
+ in resultant tokens to limit their use.
IMPROVEMENTS:
From d88e4d5019f87b8ee60ec9430b425fbbe9f3b68f Mon Sep 17 00:00:00 2001
From: Yoko
Date: Tue, 22 May 2018 08:57:36 -0700
Subject: [PATCH 05/39] Mount Filters guide (#4536)
* WIP: Mount filter guide
* WIP
* Mount filter guide for CLI, API, and UI
* updated the next step
* Updated the verification steps
* Added a note about the unseal key on secondaries
* Added more details
* Added a reference to mount filter guide
* Added a note about generating a new root token
* Added a note about local secret engine
---
.../assets/images/vault-mount-filter-0.png | 3 +
.../assets/images/vault-mount-filter-10.png | 3 +
.../assets/images/vault-mount-filter-11.png | 3 +
.../assets/images/vault-mount-filter-12.png | 3 +
.../assets/images/vault-mount-filter-13.png | 3 +
.../assets/images/vault-mount-filter-2.png | 3 +
.../assets/images/vault-mount-filter-3.png | 3 +
.../assets/images/vault-mount-filter-4.png | 3 +
.../assets/images/vault-mount-filter-5.png | 3 +
.../assets/images/vault-mount-filter-6.png | 3 +
.../assets/images/vault-mount-filter-7.png | 3 +
.../assets/images/vault-mount-filter-8.png | 3 +
.../assets/images/vault-mount-filter-9.png | 3 +
.../assets/images/vault-mount-filter.png | 3 +
.../source/guides/operations/index.html.md | 5 +
.../guides/operations/mount-filter.html.md | 521 ++++++++++++++++++
.../operations/reference-architecture.html.md | 3 +
website/source/layouts/guides.erb | 3 +
18 files changed, 574 insertions(+)
create mode 100644 website/source/assets/images/vault-mount-filter-0.png
create mode 100644 website/source/assets/images/vault-mount-filter-10.png
create mode 100644 website/source/assets/images/vault-mount-filter-11.png
create mode 100644 website/source/assets/images/vault-mount-filter-12.png
create mode 100644 website/source/assets/images/vault-mount-filter-13.png
create mode 100644 website/source/assets/images/vault-mount-filter-2.png
create mode 100644 website/source/assets/images/vault-mount-filter-3.png
create mode 100644 website/source/assets/images/vault-mount-filter-4.png
create mode 100644 website/source/assets/images/vault-mount-filter-5.png
create mode 100644 website/source/assets/images/vault-mount-filter-6.png
create mode 100644 website/source/assets/images/vault-mount-filter-7.png
create mode 100644 website/source/assets/images/vault-mount-filter-8.png
create mode 100644 website/source/assets/images/vault-mount-filter-9.png
create mode 100644 website/source/assets/images/vault-mount-filter.png
create mode 100644 website/source/guides/operations/mount-filter.html.md
diff --git a/website/source/assets/images/vault-mount-filter-0.png b/website/source/assets/images/vault-mount-filter-0.png
new file mode 100644
index 000000000..b203e2aff
--- /dev/null
+++ b/website/source/assets/images/vault-mount-filter-0.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:54d5fab44be2848d598a948ddc21517a39742a801693a5f0b35aa362cf573ed4
+size 47273
diff --git a/website/source/assets/images/vault-mount-filter-10.png b/website/source/assets/images/vault-mount-filter-10.png
new file mode 100644
index 000000000..e26b8fc0a
--- /dev/null
+++ b/website/source/assets/images/vault-mount-filter-10.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8df2040c86de8ef399a4aeac00503921d877a5d6d77948ba30d4fb5f359fecd6
+size 117802
diff --git a/website/source/assets/images/vault-mount-filter-11.png b/website/source/assets/images/vault-mount-filter-11.png
new file mode 100644
index 000000000..60d2d6e4b
--- /dev/null
+++ b/website/source/assets/images/vault-mount-filter-11.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c8a2a4dc7ece36ee9ffaefc4e358d3b3053abb6e2d47f627db4daa68072a34d7
+size 55423
diff --git a/website/source/assets/images/vault-mount-filter-12.png b/website/source/assets/images/vault-mount-filter-12.png
new file mode 100644
index 000000000..ddeef422a
--- /dev/null
+++ b/website/source/assets/images/vault-mount-filter-12.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9990a0824572dfd4c66444415fb96acd39db2a112c117aa4314fa4ed2ccfe9b9
+size 71591
diff --git a/website/source/assets/images/vault-mount-filter-13.png b/website/source/assets/images/vault-mount-filter-13.png
new file mode 100644
index 000000000..190cd9e11
--- /dev/null
+++ b/website/source/assets/images/vault-mount-filter-13.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ea9bf3fcd2abaaf6e07366b4c831bd37ce2c0750b3cc80a3b8d6359570cc987c
+size 42454
diff --git a/website/source/assets/images/vault-mount-filter-2.png b/website/source/assets/images/vault-mount-filter-2.png
new file mode 100644
index 000000000..110db14eb
--- /dev/null
+++ b/website/source/assets/images/vault-mount-filter-2.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fd485238f1a786bfafc1c298760d25f16409244e0e0985437240280416f4e9dc
+size 98414
diff --git a/website/source/assets/images/vault-mount-filter-3.png b/website/source/assets/images/vault-mount-filter-3.png
new file mode 100644
index 000000000..09ce0f795
--- /dev/null
+++ b/website/source/assets/images/vault-mount-filter-3.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9cc367adc5b24e03d4efb34896d23bfb6bfee709936f3672f06a6adf615e61c3
+size 89101
diff --git a/website/source/assets/images/vault-mount-filter-4.png b/website/source/assets/images/vault-mount-filter-4.png
new file mode 100644
index 000000000..f9904e97b
--- /dev/null
+++ b/website/source/assets/images/vault-mount-filter-4.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:92bcfe09f6ac7c0d6297f847534e1a2e6447e6970289229d3c7d4b4a9227e0f1
+size 93624
diff --git a/website/source/assets/images/vault-mount-filter-5.png b/website/source/assets/images/vault-mount-filter-5.png
new file mode 100644
index 000000000..fdc22779a
--- /dev/null
+++ b/website/source/assets/images/vault-mount-filter-5.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:868c93660738b2e87de8a15ff0c1d5834dfc58326741a5cd15093dcb42f9cf47
+size 124192
diff --git a/website/source/assets/images/vault-mount-filter-6.png b/website/source/assets/images/vault-mount-filter-6.png
new file mode 100644
index 000000000..da53dd1e0
--- /dev/null
+++ b/website/source/assets/images/vault-mount-filter-6.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8f61d65133a08a98c5eafd1d62235b567e34dbab52b10b5c5c93272433d4ece6
+size 55853
diff --git a/website/source/assets/images/vault-mount-filter-7.png b/website/source/assets/images/vault-mount-filter-7.png
new file mode 100644
index 000000000..c0d4ebf13
--- /dev/null
+++ b/website/source/assets/images/vault-mount-filter-7.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:efd0c6839b9df30061c3d5b9492392fa3801cf9699fb7f719656ec3f52a15af2
+size 133854
diff --git a/website/source/assets/images/vault-mount-filter-8.png b/website/source/assets/images/vault-mount-filter-8.png
new file mode 100644
index 000000000..cad9aeba7
--- /dev/null
+++ b/website/source/assets/images/vault-mount-filter-8.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:664421d0b1b7db4fdb6ba1bb475ac0083a19f15c2fd0b9663fa93284846ba130
+size 152179
diff --git a/website/source/assets/images/vault-mount-filter-9.png b/website/source/assets/images/vault-mount-filter-9.png
new file mode 100644
index 000000000..7d4393b24
--- /dev/null
+++ b/website/source/assets/images/vault-mount-filter-9.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:11adc5f12189cc94b37c31e3d00027769746324086dcc737e66d3227020e0789
+size 218932
diff --git a/website/source/assets/images/vault-mount-filter.png b/website/source/assets/images/vault-mount-filter.png
new file mode 100644
index 000000000..14c37682e
--- /dev/null
+++ b/website/source/assets/images/vault-mount-filter.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a022b3499b5ec92bd1d807ab324c4e90dd862811aeaa641c6b0ae4ebedd61ae4
+size 104605
diff --git a/website/source/guides/operations/index.html.md b/website/source/guides/operations/index.html.md
index a7e2d41e6..bcef88fbc 100644
--- a/website/source/guides/operations/index.html.md
+++ b/website/source/guides/operations/index.html.md
@@ -33,6 +33,11 @@ walks you through the commands to activate the Vault servers in replication mode
Please note that [Vault Replication](/docs/vault-enterprise/replication/index.html)
is a Vault Enterprise feature.
+- **[Enterprise Only]** [Mount Filter](/guides/operations/mount-filter.html)
+guide demonstrates how to selectively filter out secret engines from being
+replicated across clusters. This feature can help organizations to comply with
+***General Data Protection Regulation (GDPR)***.
+
- **[Enterprise Only]** [Vault Auto-unseal using AWS Key Management Service (KMS)](/guides/operations/autounseal-aws-kms.html) guide demonstrates an example of
how to use Terraform to provision an instance that utilizes an encryption key
from AWS Key Management Service (KMS).
diff --git a/website/source/guides/operations/mount-filter.html.md b/website/source/guides/operations/mount-filter.html.md
new file mode 100644
index 000000000..72efe50b4
--- /dev/null
+++ b/website/source/guides/operations/mount-filter.html.md
@@ -0,0 +1,521 @@
+---
+layout: "guides"
+page_title: "Vault Mount Filter - Guides"
+sidebar_current: "guides-operations-mount-filter"
+description: |-
+ This guide demonstrates how to selectively filter out secret mounts for
+ Performance Replication.
+---
+
+# Vault Mount Filter
+
+~> **Enterprise Only:** Mount filter feature is a part of _Vault Enterprise Premium_.
+
+Mount filters are a new way of controlling which secrets are moved across
+clusters and physical regions as a result of replication. With mount filters,
+users can select which secret engines will be replicated as part of a
+performance replication relationship.
+
+By default, all non-local secret engines and associated data are replicated as
+part of replication. The mount filter feature allows users to whitelist or
+blacklist which secret engines are replicated, thereby allowing users to further
+control the movement of secrets across their infrastructure.
+
+![Performance Replication](/assets/images/vault-mount-filter-2.png)
+
+
+## Reference Materials
+
+- Preparing for GDPR Compliance with HashiCorp Vault [webinar](https://www.hashicorp.com/resources/preparing-for-gdpr-compliance-with-hashicorp-vault)
+- Preparing for GDPR Compliance with HashiCorp Vault [blog post](https://www.hashicorp.com/blog/preparing-for-gdpr-compliance-with-hashicorp-vault)
+- [Create Mounts Filter (API)](/api/system/replication-performance.html#create-mounts-filter)
+- [Performance Replication and Disaster Recovery (DR) Replication](/docs/enterprise/replication/index.html#performance-replication-and-disaster-recovery-dr-replication)
+
+## Estimated Time to Complete
+
+10 minutes
+
+## Challenge
+
+[**General Data Protection Regulation (GDPR)**](https://www.eugdpr.org/) is designed
+to strengthen data protection and privacy for all individuals within the
+European Union. It requires that personally identifiable data not be physically
+transferred to locations outside the European Union unless the region or country
+has an equal rigor of data protection regulation as the EU.
+
+Failure to abide by GDPR will result in fines as high as 20 million EUR or 4% of
+the global annual revenue (whichever greater).
+
+
+## Solution
+
+Leverage Vault's **mount filter** feature to abide by data movements and
+sovereignty regulations while ensuring performance access across geographically
+distributed regions.
+
+The [***Preparing for GDPR Compliance with HashiCorp
+Vault***](https://www.hashicorp.com/resources/preparing-for-gdpr-compliance-with-hashicorp-vault)
+webinar discusses the GDPR compliance further in details.
+
+[![YouTube](/assets/images/vault-mount-filter.png)](https://youtu.be/hmf6sN4W8pE)
+
+## Prerequisites
+
+This intermediate Vault operations guide assumes that you have some working
+knowledge of Vault.
+
+You need two Vault Enterprise clusters: one representing the EU cluster, and
+another representing the US cluster both backed by Consul for storage.
+
+
+## Steps
+
+**Scenario:** You have a Vault cluster in EU and wish to span across the United
+States by setting up a secondary cluster and enable the performance
+replication. However, some data must remain in EU and should ***not*** be
+replicated to the US cluster.
+
+![Guide Scenario](/assets/images/vault-mount-filter-0.png)
+
+Leverage the mount filter feature to blacklist the secrets, that are subject to
+GDPR, from being replicated across the regions.
+
+1. [Segment GDPR and non-GDPR secret engines](#step1)
+1. [Enable performance replication with mount filter](#step2)
+1. [Verify the replication mount filter](#step3)
+1. [Enable a local secret engine](#step4)
+
+~> **NOTE:** Ensure that GDPR data is segmented by secret mount and blacklist
+the movement of those secret mounts to non-GDPR territories.
+
+
+### Step 1: Segment GDPR and non-GDPR secret engines
+
+In the EU cluster (primary cluster), enable key/value secret engines:
+
+- At **`EU_GDPR_data`** for GDPR data
+- At **`US_NON_GDPR_data`** for non-GDPR data localized for US
+
+#### CLI command
+
+```shell
+# For GDPR data
+$ vault secrets enable -path=EU_GDPR_data kv-v2
+
+# For non-GDPR data accessible in US
+$ vault secrets enable -path=US_NON_GDPR_data kv-v2
+```
+
+#### API call using cURL
+
+```shell
+# For GDPR data
+$ curl --header "X-Vault-Token: ..." \
+ --request POST \
+ --data '{"type":"kv-v2"}' \
+ https://eu-west-1.compute.com:8200/v1/sys/mounts/EU_GDPR_data
+
+# For non-GDPR data accessible in US
+$ curl --header "X-Vault-Token: ..." \
+ --request POST \
+ --data '{"type":"kv-v2"}' \
+ https://eu-west-1.compute.com:8200/v1/sys/mounts/US_NON_GDPR_data
+```
+
+#### Web UI
+
+Open a web browser and launch the Vault UI (e.g.
+https://eu-west-1.compute.com:8200/ui) and then login.
+
+Select **Enable new engine** and enter corresponding parameter values:
+
+![GDPR KV](/assets/images/vault-mount-filter-3.png)
+
+![Non-GDPR KV](/assets/images/vault-mount-filter-4.png)
+
+
+Click **Enable Engine** to complete.
+
+
+### Step 2: Enable performance replication with mount filter
+
+#### CLI command
+
+1. Enable performance replication on the **primary** cluster.
+
+ ```plaintext
+ $ vault write -f sys/replication/performance/primary/enable
+ WARNING! The following warnings were returned from Vault:
+
+ * This cluster is being enabled as a primary for replication. Vault will be
+ unavailable for a brief period and will resume service shortly.
+ ```
+
+1. Generate a secondary token.
+
+ ```plaintext
+ $ vault write sys/replication/performance/primary/secondary-token id="secondary"
+ Key Value
+ --- -----
+ wrapping_token: eyJhbGciOiJFUzUxMiIsInR5cCI6IkpXVCJ9.eyJhZGRyIjoiaHR0cDovLzE3Mi4zMS4yMC4xODA6ODIwMyIsImV4cCI6MTUyNTg0ODAxMywiaWF0IjoxNTI1ODQ2MjEzLCJqdGkiOiJlNTFiMjUxZi01ZTg2LTg4OWEtNGZmMy03NTQzMjRkNTdlMGQiLCJ0eXBlIjoid3JhcHBpbmcifQ.MIGGAkE2dDj3nmaoLHg7oldQ1iZPD0U8doyj3x3mQUVfTl8W99QYG8GM6VGVzhRPGvKctGriuo2oXN_8euWQb01M1y6n7gJBSu-qdXw-v2RieOyopAHls1bWhw4sO9Nlds8IDFA15vqkLXnq2g4_5lvlhxpP7B8dEOHvWXkHG4kJ_mKvrgR0dU0
+ wrapping_accessor: 6ded4fb0-5e8c-2a37-1b3e-823673220348
+ wrapping_token_ttl: 30m
+ wrapping_token_creation_time: 2018-05-09 06:10:13.437421436 +0000 UTC
+ wrapping_token_creation_path: sys/replication/performance/primary/secondary-token
+ ```
+
+1. Create a **mount filter** to blacklist `EU_GDPR_data`.
+
+ ```plaintext
+ $ vault write sys/replication/performance/primary/mount-filter/secondary \
+ mode="blacklist" paths="EU_GDPR_data/"
+ ```
+
+1. Enable performance replication on the **secondary** cluster.
+
+ ```plaintext
+ $ vault write sys/replication/performance/secondary/enable token="..."
+ ```
+ Where the `token` is the `wrapping_token` obtained from the primary cluster.
+
+ !> **NOTE:** This will immediately clear all data in the secondary cluster.
+
+#### API call using cURL
+
+1. Enable performance replication on the **primary** cluster.
+
+ ```plaintext
+ $ curl --header "X-Vault-Token: ..." \
+ --request POST \
+ --data '{"primary_cluster_addr":"https://eu-west-1.compute.com:8200"}' \
+ https://eu-west-1.compute.com:8200/v1/sys/replication/performance/primary/enable
+ ```
+
+1. Generate a secondary token.
+
+ ```plaintext
+ $ curl --header "X-Vault-Token: ..." \
+ --request POST \
+ --data '{ "id": "secondary"}' \
+ https://eu-west-1.compute.com:8200/v1/sys/replication/performance/primary/secondary-token | jq
+ {
+ "request_id": "",
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": 0,
+ "data": null,
+ "wrap_info": {
+ "token": "eyJhbGciOiJFUzUxMiIsInR5cCI6IkpXVCJ9.eyJhZGRyIjoiaHR0cDovLzEyNy4wLjAuMTo4MjAzIiwiZXhwIjoxNTI1ODI5Njc2LCJpYXQiOjE1MjU4Mjc4NzYsImp0aSI6IjAwNmVkMDdjLWQ0MzYtZWViYy01OWYwLTdiMTU0ZGFmMDNiMCIsInR5cGUiOiJ3cmFwcGluZyJ9.MIGHAkF6saWWL-oRQMJIoUnaUOHNkcoHZCBwQs6mSMjPBopMi8DkGCJGBrh4jgV2mSzwFY1r5Ne7O66HmuMpm40MsYqjAQJCANSco_Sx5q6FmQSfoY-HtsVO1_YKWF4O6B7gYCvPKYkODMIwe5orCSgmIDyXHZt-REPm0sfdk4ZNyRCIRK5hDWyQ",
+ "accessor": "6ea2a4e2-2926-120f-f288-c2348c78fb3e",
+ "ttl": 1800,
+ "creation_time": "2018-05-09T01:04:36.514715311Z",
+ "creation_path": "sys/replication/performance/primary/secondary-token"
+ },
+ "warnings": null,
+ "auth": null
+ }
+ ```
+
+1. Create a **mount filter** to blacklist `EU_GDPR_data`.
+
+ ```plaintext
+ $ tee payload.json < **NOTE:** This will immediately clear all data in the secondary cluster.
+
+
+#### Web UI
+
+1. Select **Replication** and check the **Performance** radio button.
+ ![Performance Replication - primary](/assets/images/vault-mount-filter-5.png)
+
+1. Click **Enable replication**.
+
+1. Select the **Secondaries** tab, and then click **Add**.
+ ![Performance Replication - primary](/assets/images/vault-mount-filter-6.png)
+
+1. Populate the **Secondary ID** field, and then select **Configure performance
+mount filtering** to set your mount filter options. You can filter by
+whitelisting or blacklisting. For this example, select **Blacklist**.
+
+1. Check **EU_GDPR_data** to prevent it from being replicated to the secondary
+cluster.
+ ![Performance Replication - primary](/assets/images/vault-mount-filter-7.png)
+
+1. Click **Generate token**.
+ ![Performance Replication - primary](/assets/images/vault-mount-filter-8.png)
+
+1. Click **Copy** to copy the token.
+
+1. Now, launch the Vault UI for the secondary cluster (e.g. https://us-central.compute.com:8201/ui), and then click **Replication**.
+
+1. Check the **Performance** radio button, and then select **secondary** under the **Cluster mode**. Paste the token you copied from the primary.
+ ![Performance Replication - secondary](/assets/images/vault-mount-filter-9.png)
+
+1. Click **Enable replication**.
+
+
+
+~> **NOTE:** At this point, the secondary cluster must be unsealed using the
+**primary cluster's unseal key**. If the secondary is in an HA cluster, ensure
+that each standby is sealed and unsealed with the primary’s unseal keys. The
+secondary cluster mirrors the configuration of its primary cluster's backends
+such as auth methods, secret engines, audit devices, etc. It uses the primary as
+the _source of truth_ and ass token requests to the primary.
+
+
+Restart the secondary vault server (e.g. `https://us-central.compute.com:8201`)
+and unseal it with the primary cluster's unseal key.
+
+```plaintext
+$ vault operator unseal
+Unseal Key (will be hidden):
+```
+
+The initial root token on the secondary no longer works. Use the auth methods
+configured on the primary cluster to log into the secondary.
+
+**Example:**
+
+Enable and configure the userpass auth method on the **primary** cluster and
+create a new username and password.
+
+```shell
+# Enable the userpass auth method on the primary
+$ vault auth enable userpass
+
+# Create a user with admin policy
+$ vault write auth/userpass/users/james password="passw0rd" policy="admin"
+```
+
+-> Alternatively, you can [generate a new root token](/guides/operations/generate-root.html)
+using the primary cluster's unseal key. However, it is recommended that root
+tokens are only used for just enough initial setup or in emergencies.
+
+
+Log into the **secondary** cluster using the enabled auth method.
+
+```plaintext
+$ vault login -method=userpass username=james password="passw0rd"
+```
+
+
+### Step 3: Verify the replication mount filter
+
+Once the replication completes, verify that the secrets stored in the
+`EU_GDPR_data` never get replicated to the US cluster.
+
+#### CLI command
+
+On the **EU** cluster, write some secrets:
+
+```shell
+# Write some secret at EU_GDPR_data/secret
+$ vault kv put EU_GDPR_data/secret pswd="password"
+Key Value
+--- -----
+created_time 2018-05-10T18:00:38.912587665Z
+deletion_time n/a
+destroyed false
+version 1
+
+# Write some secret at US_NON_GDPR_data/secret
+$ vault kv put US_NON_GDPR_data/secret apikey="my-api-key"
+Key Value
+--- -----
+created_time 2018-05-10T18:04:37.554665851Z
+deletion_time n/a
+destroyed false
+version 1
+```
+
+From the **US** cluster, read the secrets:
+
+```shell
+# Read the secrets at EU_GDPR_data/secret
+$ vault kv get EU_GDPR_data/secret
+No value found at EU_GDPR_data/secret
+
+# Read the secrets at US_NON_GDPR_data/secret
+$ vault kv get US_NON_GDPR_data/secret
+====== Metadata ======
+Key Value
+--- -----
+created_time 2018-05-10T18:09:07.717250408Z
+deletion_time n/a
+destroyed false
+version 1
+
+===== Data =====
+Key Value
+--- -----
+apikey my-api-key
+```
+
+
+#### API call using cURL
+
+On the **EU** cluster, write some secret:
+
+```shell
+# Create the request payload
+$ tee payload.json < **Create secret**:
+
+![Secrets](/assets/images/vault-mount-filter-12.png)
+
+Enter the values and click **Save**. Repeat the step to write some secrets at
+the **US_NON_GDPR_data** path as well.
+
+
+On the **US** cluster, select **US_NON_GDPR_data**. You should be able to see
+the `apikey` under `US_NON_GDPR_data/secret`.
+
+![Secrets](/assets/images/vault-mount-filter-13.png)
+
+The **EU_GDPR_data** data is not replicated, so you won't be able to see the
+secrets.
+
+
+### Step 4: Enable a local secret engine
+
+When replication is enabled, you can mark the secrets engine local only. Local
+secret engines are not replicated or removed by replication.
+
+Login to the **secondary** cluster and enable key/value secret engine at
+`US_ONLY_data` to store secrets only valid for the US region.
+
+#### CLI command
+
+Pass the `-local` flag:
+
+```plaintext
+$ vault secrets enable -local -path=US_ONLY_data kv-v2
+```
+
+#### API call using cURL
+
+Pass the `local` parameter in the API request:
+
+```plaintext
+$ tee payload.json <
+
+-> **NOTE:** `US_ONLY_data` only exists locally in the secondary cluster that
+you won't be able to see it from the primary cluster.
+
+
+
+
+## Next steps
+
+Read [Vault Deployment Reference
+Architecture](/guides/operations/reference-architecture.html) to learn more
+about the recommended deployment practices.
diff --git a/website/source/guides/operations/reference-architecture.html.md b/website/source/guides/operations/reference-architecture.html.md
index 09edd59cc..02caa0752 100644
--- a/website/source/guides/operations/reference-architecture.html.md
+++ b/website/source/guides/operations/reference-architecture.html.md
@@ -232,6 +232,9 @@ Vault performance replication allows for secrets management across many sites.
Secrets, authentication methods, authorization policies and other details are
replicated to be active and available in multiple locations.
+Refer to the [Vault Mount Filter](/guides/operations/mount-filter.html) guide
+about filtering out secret engines from being replicated across regions.
+
#### Disaster Recovery Replication
Vault disaster recovery replication ensures that a standby Vault cluster is kept
diff --git a/website/source/layouts/guides.erb b/website/source/layouts/guides.erb
index 4f5490d78..c82727912 100644
--- a/website/source/layouts/guides.erb
+++ b/website/source/layouts/guides.erb
@@ -22,6 +22,9 @@
From d60360ddbe7fcfbbd98e11806bacb1a51bd3b14b Mon Sep 17 00:00:00 2001
From: Jeff Mitchell
Date: Tue, 22 May 2018 14:07:07 -0400
Subject: [PATCH 06/39] Add instructions for both kvv1 and kvv2 to getting
started policies info
---
.../intro/getting-started/policies.html.md | 55 +++++++++++++++++--
1 file changed, 49 insertions(+), 6 deletions(-)
diff --git a/website/source/intro/getting-started/policies.html.md b/website/source/intro/getting-started/policies.html.md
index 9dbc777a8..f7b6711b3 100644
--- a/website/source/intro/getting-started/policies.html.md
+++ b/website/source/intro/getting-started/policies.html.md
@@ -27,13 +27,23 @@ Policies are authored in [HCL][hcl], but it is JSON compatible. Here is an
example policy:
```hcl
+# Normal servers have version 1 of KV mounted by default, so will need these
+# paths:
path "secret/*" {
capabilities = ["create"]
}
-
path "secret/foo" {
capabilities = ["read"]
}
+
+# Dev servers have version 2 of KV mounted by default, so will need these
+# paths:
+path "secret/data/*" {
+ capabilities = ["create"]
+}
+path "secret/data/foo" {
+ capabilities = ["read"]
+}
```
With this policy, a user could write any secret to `secret/`, except to
@@ -69,13 +79,23 @@ Here is an example you can copy-paste in the terminal:
```text
$ vault policy write my-policy -<
Date: Tue, 22 May 2018 11:23:11 -0700
Subject: [PATCH 07/39] Seal Wrap / FIPS 140-2 Compliance guide (#4558)
* WIP - Seal Wrap guide
* WIP: Seal Wrap guide
* Added a brief description about the Seal Wrap guide
* Incorporated feedbacks
* Updated FIPS language
Technically everything looks great. I've updated some of the language here as "compliance" could be interpreted to mean that golang's crypto and xcrypto libraries have been certified compliant with FIPS. Unfortunately they have not, and Leidos' cert is only about how Vault can operate in tandem with FIPS-certified modules.
It's a very specific update, but it's an important one for some VE customers.
Looks great - thanks!
* Removed 'Compliance' from title
* typo fix
---
.../assets/images/vault-hsm-autounseal.png | 3 +
.../assets/images/vault-seal-wrap-2.png | 3 +
.../assets/images/vault-seal-wrap-3.png | 3 +
.../assets/images/vault-seal-wrap-4.png | 3 +
.../assets/images/vault-seal-wrap-5.png | 3 +
.../assets/images/vault-seal-wrap-6.png | 3 +
.../source/assets/images/vault-seal-wrap.png | 3 +
.../operations/autounseal-aws-kms.html.md | 2 +-
.../source/guides/operations/index.html.md | 6 +-
.../guides/operations/seal-wrap.html.md | 643 ++++++++++++++++++
.../secret-mgmt/db-root-rotation.html.md | 2 +-
website/source/layouts/guides.erb | 7 +-
12 files changed, 676 insertions(+), 5 deletions(-)
create mode 100644 website/source/assets/images/vault-hsm-autounseal.png
create mode 100644 website/source/assets/images/vault-seal-wrap-2.png
create mode 100644 website/source/assets/images/vault-seal-wrap-3.png
create mode 100644 website/source/assets/images/vault-seal-wrap-4.png
create mode 100644 website/source/assets/images/vault-seal-wrap-5.png
create mode 100644 website/source/assets/images/vault-seal-wrap-6.png
create mode 100644 website/source/assets/images/vault-seal-wrap.png
create mode 100644 website/source/guides/operations/seal-wrap.html.md
diff --git a/website/source/assets/images/vault-hsm-autounseal.png b/website/source/assets/images/vault-hsm-autounseal.png
new file mode 100644
index 000000000..e385f7aaa
--- /dev/null
+++ b/website/source/assets/images/vault-hsm-autounseal.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:377a785601201093efea981c0ff77cb8bbdab8e57bdbc247f007bd96501e5ead
+size 42894
diff --git a/website/source/assets/images/vault-seal-wrap-2.png b/website/source/assets/images/vault-seal-wrap-2.png
new file mode 100644
index 000000000..4c0034abd
--- /dev/null
+++ b/website/source/assets/images/vault-seal-wrap-2.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bf0bb386dc44b28ad7f82115e558d6bdbae344c284307055696c70586f7489d6
+size 47887
diff --git a/website/source/assets/images/vault-seal-wrap-3.png b/website/source/assets/images/vault-seal-wrap-3.png
new file mode 100644
index 000000000..3383e6d2d
--- /dev/null
+++ b/website/source/assets/images/vault-seal-wrap-3.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae354818a0a29604abbc5f9cd6e45de8fd1e2c2101d19e6bba3ed5c809838035
+size 140731
diff --git a/website/source/assets/images/vault-seal-wrap-4.png b/website/source/assets/images/vault-seal-wrap-4.png
new file mode 100644
index 000000000..ea2b61811
--- /dev/null
+++ b/website/source/assets/images/vault-seal-wrap-4.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:91ebcdbb13faa78bce49bc1eb3a3303324fa3a0dc12672fb242f7606f6e20514
+size 36862
diff --git a/website/source/assets/images/vault-seal-wrap-5.png b/website/source/assets/images/vault-seal-wrap-5.png
new file mode 100644
index 000000000..8fe66a6ee
--- /dev/null
+++ b/website/source/assets/images/vault-seal-wrap-5.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:10c709263cf0056bbbd7b60c801701c0fcf1dc26bfa4fd1e0bd0f461233ad16a
+size 54606
diff --git a/website/source/assets/images/vault-seal-wrap-6.png b/website/source/assets/images/vault-seal-wrap-6.png
new file mode 100644
index 000000000..1a98c6ff8
--- /dev/null
+++ b/website/source/assets/images/vault-seal-wrap-6.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a5b69749514a50401dd7d8a706bc3a596ccf1e2618cf091ca162441d92d4837c
+size 54563
diff --git a/website/source/assets/images/vault-seal-wrap.png b/website/source/assets/images/vault-seal-wrap.png
new file mode 100644
index 000000000..bc1d37351
--- /dev/null
+++ b/website/source/assets/images/vault-seal-wrap.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eeaea34e1f6529820fc8c3c895897124441418405f224a34ca2da8ceba9ed4ea
+size 47002
diff --git a/website/source/guides/operations/autounseal-aws-kms.html.md b/website/source/guides/operations/autounseal-aws-kms.html.md
index 9d3784b70..3424cd2e9 100644
--- a/website/source/guides/operations/autounseal-aws-kms.html.md
+++ b/website/source/guides/operations/autounseal-aws-kms.html.md
@@ -1,7 +1,7 @@
---
layout: "guides"
page_title: "Vault Auto-unseal using AWS KMS - Guides"
-sidebar_current: "guides-autounseal-aws-kms"
+sidebar_current: "guides-operations-autounseal-aws-kms"
description: |-
In this guide, we'll show an example of how to use Terraform to provision an
instance that can utilize an encryption key from AWS Key Management Services
diff --git a/website/source/guides/operations/index.html.md b/website/source/guides/operations/index.html.md
index bcef88fbc..4a922aaf9 100644
--- a/website/source/guides/operations/index.html.md
+++ b/website/source/guides/operations/index.html.md
@@ -36,12 +36,16 @@ is a Vault Enterprise feature.
- **[Enterprise Only]** [Mount Filter](/guides/operations/mount-filter.html)
guide demonstrates how to selectively filter out secret engines from being
replicated across clusters. This feature can help organizations to comply with
-***General Data Protection Regulation (GDPR)***.
+***General Data Protection Regulation (GDPR)***.
- **[Enterprise Only]** [Vault Auto-unseal using AWS Key Management Service (KMS)](/guides/operations/autounseal-aws-kms.html) guide demonstrates an example of
how to use Terraform to provision an instance that utilizes an encryption key
from AWS Key Management Service (KMS).
+- **[Enterprise Only]** [Seal Wrap / FIPS 140-2](/guides/operations/seal-wrap.html)
+guide demonstrates how Vault's seal wrap feature works to encrypt your secrets
+leveraging FIPS 140-2 certified HSM.
+
- [Root Token Generation](/guides/operations/generate-root.html) guide
demonstrates the workflow of regenerating root tokens. It is considered to be a
best practice not to persist the initial **root** token. If a root token needs
diff --git a/website/source/guides/operations/seal-wrap.html.md b/website/source/guides/operations/seal-wrap.html.md
new file mode 100644
index 000000000..b3c70b7a0
--- /dev/null
+++ b/website/source/guides/operations/seal-wrap.html.md
@@ -0,0 +1,643 @@
+---
+layout: "guides"
+page_title: "Seal Wrap / FIPS 140-2 - Guides"
+sidebar_current: "guides-operations-seal-wrap"
+description: |-
+ In this guide,
+---
+
+
+# Seal Wrap / FIPS 140-2
+
+~> **Enterprise Only:** Vault's HSM auto-unseal and Seal Wrap features are a
+part of _Vault Enterprise_.
+
+***Vault Enterprise*** integrates with [HSM platforms](/docs/enterprise/hsm/index.html)
+to opt-in automatic [unsealing](/docs/concepts/seal.html#unsealing).
+HSM integration provides three pieces of special functionality:
+
+- **Master Key Wrapping**: Vault protects its master key by transiting it through
+the HSM for encryption rather than splitting into key shares
+- **Automatic Unsealing**: Vault stores its encrypted master key in storage,
+allowing for automatic unsealing
+- **Seal Wrapping** to provide FIPS KeyStorage-conforming functionality for Critical Security Parameters
+
+![Unseal with HSM](/assets/images/vault-hsm-autounseal.png)
+
+In some large organizations, there is a fair amount of complexity in designating
+key officers, who might be available to unseal Vault installations as the most
+common pattern is to deploy Vault immutably. As such automating unseal using an
+HSM provides a simplified yet secure way of unsealing Vault nodes as they get
+deployed.
+
+Vault pulls its encrypted master key from storage and transit it through the
+HSM for decryption via **PKCS \#11 API**. Once the master key is decrypted,
+Vault uses the master key to decrypt the encryption key to resume with Vault
+operations.
+
+
+## Reference Material
+
+- [HashiCorp + AWS: Integrating CloudHSM with Vault Enterprise](https://www.hashicorp.com/resources/hashicorp-and-aws-integrating-cloudhsm-with-vault-e) webinar
+- [Seal Wrap documentation](/docs/enterprise/sealwrap/index.html)
+- [Vault Configuration - pkcs11 Seal](/docs/configuration/seal/pkcs11.html)
+- [Vault Enterprise HSM Support](/docs/enterprise/hsm/index.html)
+- [NIST SC-12: Cryptographic Key Establishment and Management](https://nvd.nist.gov/800-53/Rev4/control/SC-12)
+- [NIST SC-13: Cryptographic Protection](https://nvd.nist.gov/800-53/Rev4/control/SC-13)
+
+
+## Estimated Time to Complete
+
+10 minutes
+
+
+## Challenge
+
+The Federal Information Processing Standard (FIPS) 140-2 is a U.S. Government
+computer security standard used to accredit cryptography modules. If your
+product or service does not follow FIPS' security requirements, it may
+complicate your ability to operate with U.S. Government data.
+
+Aside from doing business with U.S. government, your organization may care about
+FIPS which approves various cryptographic ciphers for hashing, signature, key
+exchange, and encryption for security.
+
+
+## Solution
+
+Integrate Vault with FIPS 140-2 certified HSM and enable the ***Seal Wrap***
+feature to protect your data.
+
+Vault encrypts secrets using 256-bit AES in GCM mode with a randomly generated
+nonce prior to writing them to its persistent storage. By enabling seal wrap,
+Vault wraps your secrets with **an extra layer of encryption** leveraging the
+HSM encryption and decryption.
+
+![Seal Wrap](/assets/images/vault-seal-wrap.png)
+
+#### Benefits of the Seal Wrap:
+
+- Conformance with FIPS 140-2 directives on Key Storage and Key Transport as [certified by Leidos](/docs/enterprise/sealwrap/index.html#fips-140-2-compliance)
+- Supports FIPS level of security equal to HSM
+ * For example, if you use Level 3 hardware encryption on an HSM, Vault will be
+ using FIPS 140-2 Level 3 cryptography
+- Allows Vault to be deployed in high security [GRC](https://en.wikipedia.org/wiki/Governance,_risk_management,_and_compliance)
+environments (e.g. PCI-DSS, HIPAA) where FIPS guidelines important for external audits
+- Pathway for Vault's use in managing Department of Defense's (DOD) or North
+Atlantic Treaty Organization (NATO) military secrets
+
+
+## Prerequisites
+
+This intermediate operations guide assumes that you have:
+
+- A [supported HSM](/docs/enterprise/hsm/index.html) cluster to be integrated
+ with Vault
+- Vault Enterprise Premium
+
+
+
+## Steps
+
+This guide walks you through the following steps:
+
+1. [Configure HSM Auto-unseal](#step1)
+1. [Enable Seal Wrap](#step2)
+1. [Test the Seal Wrap Feature](#step3)
+
+
+
+### Step 1: Configure HSM Auto-unseal
+
+When a Vault server is started, it normally starts in a sealed state where a
+quorum of existing unseal keys is required to unseal it. By integrating Vault
+with HSM, your Vault server can be automatically unsealed by the trusted HSM key
+provider.
+
+#### Task 1: Write a Vault configuration file
+
+To integrate your Vault Enterprise server with an HSM cluster, the configuration
+file must define the [`PKCS11 seal` stanza](/docs/configuration/seal/pkcs11.html)
+providing necessary connection information.
+
+
+**Example: `config-hsm.hcl`**
+
+```shell
+# Provide your AWS CloudHSM cluster connection information
+seal "pkcs11" {
+ lib = "/opt/cloudhsm/lib/libcloudhsm_pkcs11.so"
+ slot = "1"
+ pin = "vault:Password1"
+ key_label = "hsm_demo"
+ hmac_key_label = "hsm_hmac_demo"
+ generate_key = "true"
+}
+
+# Configure the storage backend for Vault
+storage "file" {
+ path = "/tmp/vault"
+}
+
+# Addresses and ports on which Vault will respond to requests
+listener "tcp" {
+ address = "127.0.0.1:8200"
+ tls_disable = 1
+}
+
+ui = true
+```
+
+> **NOTE:** For the purpose of this guide, the storage backend is set to the
+local file system (`/tmp/vault`) to make the verification step easy.
+
+The example configuration defines the following in its **`seal`** stanza:
+
+- **`lib`** is set to the path to the PKCS \#11 library on the virtual machine
+ where Vault Enterprise is installed
+- **`slot`** should be set to the slot number to use
+- **`pin`** is the PKCS \#11 PIN for login
+- **`key_label`** defines the label of the key you want to use
+- **`hmac_key_label`** defines the label of the key you want to use for HMACing.
+ (NOTE: HMAC is optional and only used for mechanisms that do not support
+ authenticated data.)
+- **`generate_key`** is set to `true`. If no existing key with the label
+ specified by `key_label` can be found at Vault initialization time, Vault
+ generates a key
+
+~> **IMPORTANT:** Having Vault generate its own key is the easiest way to get up
+and running, but for security, Vault marks the key as **non-exportable**. If
+your HSM key backup strategy requires the key to be exportable, you should
+generate the key yourself. Refer to the [key generation attributes](/docs/configuration/seal/pkcs11.html#vault-key-generation-attributes).
+
+
+
+
+#### Task 2: Initialize your Vault Enterprise server
+
+Start the Vault server with your Vault configuration file. For example, if your
+configuration file is located at `/home/ec2-user/config-hsm.hcl`, the command
+would look like:
+
+```plaintext
+$ vault server -config=/home/ec2-user/config-hsm.hcl
+
+ SDK Version: 2.03
+==> Vault server configuration:
+
+ HSM PKCS#11 Version: 2.40
+ HSM Library: Cavium PKCS#11 Interface
+ HSM Library Version: 1.0
+ HSM Manufacturer ID: Cavium Networks
+ HSM Type: pkcs11
+ Cgo: enabled
+ Listener 1: tcp (addr: "127.0.0.1:8200", cluster address: "127.0.0.1:8201", tls: "disabled")
+ Log Level: info
+ Mlock: supported: true, enabled: false
+ Storage: file
+ Version: Vault v0.10.1+ent.hsm
+ Version Sha: 0e628142d6b6e5cabfdb9680a6d669d38f15574f
+
+==> Vault server started! Log data will stream in below:
+```
+
+
+
+In another terminal, set the `VAULT_ADDR` environment variable, and [initialize]
+(/intro/getting-started/deploy.html#initializing-the-vault) your Vault server.
+
+**Example:**
+
+```shell
+# Set the VAULT_ADDR environment variable
+$ export VAULT_ADDR="http://127.0.0.1:8200"
+
+# Initialize Vault
+$ vault operator init
+
+Recovery Key 1: 2bU2wOfmyMqYcsEYo4Mo9q4s/KAODgHHjcmZmFOo+XY=
+Initial Root Token: 8d726c6b-98ba-893f-23d5-be3d2fec480e
+
+Success! Vault is initialized
+
+Recovery key initialized with 1 key shares and a key threshold of 1. Please
+securely distribute the key shares printed above.
+```
+
+There is only a single master key created which is encrypted by the HSM using
+PKCS \#11, and then placed in the storage. When Vault needs to be unsealed, it
+grabs the HSM encrypted master key from the storage, round trips it through the
+HSM to decrypt the master key.
+
+~> **NOTE:** When Vault is initialized while using an HSM, rather than unseal
+keys being returned to the operator, **recovery keys** are returned. These are
+generated from an internal recovery key that is [split via Shamir's Secret
+Sharing](/docs/enterprise/hsm/behavior.html#initialization), similar to Vault's
+treatment of unseal keys when running without an HSM. Some Vault operations such
+as generation of a root token require these recovery keys.
+
+Login to the Vault using the generated root token to verify.
+
+```plaintext
+$ vault login 8d726c6b-98ba-893f-23d5-be3d2fec480e
+```
+
+#### Task 3: Verification
+
+Stop and restart the Vault server and then verify its status:
+
+```plaintext
+$ vault status
+
+Key Value
+--- -----
+Recovery Seal Type shamir
+Sealed false
+Total Recovery Shares 1
+Threshold 1
+Version 0.10.1+ent.hsm
+Cluster Name vault-cluster-80556565
+Cluster ID 40316cdd-3d42-ec36-e7b0-6a7a0684568c
+HA Enabled false
+```
+
+The `Sealed` status is **`false`** which means that the Vault was automatically
+unsealed upon its start. You can proceed with Vault operations.
+
+
+### Step 2: Enable Seal Wrap
+
+-> **NOTE:** For FIPS 140-2 compliance, seal wrap requires FIPS
+140-2 Certified HSM which is supported by _Vault Enterprise Premium_.
+
+For some values, seal wrapping is **always enabled** including the recovery key, any
+stored key shares, the master key, the keyring, and more. When working with the
+key/value secret engine, you can enable seal wrap to wrap all data.
+
+
+### CLI command
+
+Check the enabled secret engines.
+
+```plaintext
+$ vault secrets list -format=json
+{
+ ...
+ "secret/": {
+ "type": "kv",
+ "description": "key/value secret storage",
+ "accessor": "kv_75820543",
+ "config": {
+ "default_lease_ttl": 0,
+ "max_lease_ttl": 0,
+ "force_no_cache": false
+ },
+ "options": {
+ "version": "1"
+ },
+ "local": false,
+ "seal_wrap": false
+ },
+ ...
+```
+
+Notice that the `seal_wrap` parameter is set to **`false`**.
+
+> For the purpose of comparing seal wrapped data against unwrapped data, enable
+additional key/value secret engine at the `secret2/` path.
+
+```shell
+# Pass the '-seal-wrap' flag when you enable the KV workflow
+$ vault secrets enable -path=secret2/ -version=1 -seal-wrap kv
+```
+
+The above command enabled [key/value version 1](/docs/secrets/kv/kv-v1.html) with
+seal wrap feature enabled.
+
+```plaintext
+$ vault secrets list -format=json
+{
+ ...
+ "secret2/": {
+ "type": "kv",
+ "description": "",
+ "accessor": "kv_bdd74241",
+ "config": {
+ "default_lease_ttl": 0,
+ "max_lease_ttl": 0,
+ "force_no_cache": false
+ },
+ "options": {
+ "version": "1"
+ },
+ "local": false,
+ "seal_wrap": true
+ },
+ ...
+```
+
+Notice that the `seal_wrap` parameter is set to **`true`** at `secret2/`.
+
+
+#### API call using cURL
+
+Check the enabled secret engines.
+
+```plaintext
+$ curl --header "X-Vault-Token: ..." \
+ http://127.0.0.1:8200/v1/sys/mounts | jq
+...
+ "secret/": {
+ "accessor": "kv_f05b8b9c",
+ "config": {
+ "default_lease_ttl": 0,
+ "force_no_cache": false,
+ "max_lease_ttl": 0,
+ "plugin_name": ""
+ },
+ "description": "key/value secret storage",
+ "local": false,
+ "options": {
+ "version": "2"
+ },
+ "seal_wrap": false,
+ "type": "kv"
+ },
+...
+```
+
+Notice that the `seal_wrap` parameter is set to **`false`**.
+
+> For the purpose of comparing seal wrapped data against unwrapped data, enable
+additional key/value secret engine at the `secret2/` path.
+
+```shell
+# Set the seal_wrap parameter to true in the request payload
+$ tee payload.json < For the purpose of comparing seal wrapped data against unwrapped data, enable
+additional key/value secret engine at the `secret2/` path.
+
+Select **Enable new engine**.
+
+- Enter **`secret2`** in the path field
+- Select **Version 1** for KV version
+- Select the check box for **Seal Wrap**
+
+![Enable Secret Engine](/assets/images/vault-seal-wrap-3.png)
+
+Click **Enable Engine**.
+
+
+### Step 3: Test the Seal Wrap Feature
+
+In this step, you are going to:
+
+1. Write some test data
+1. [View the encrypted secrets](#view-the-encrypted-secrets)
+
+#### CLI command
+
+Write a secret at `secret/unwrapped`.
+
+```shell
+# Write a key named 'password' with its value 'my-long-password'
+$ vault kv put secret/unwrapped password="my-long-password"
+
+# Read the path to verify
+$ vault kv get secret/unwrapped
+====== Data ======
+Key Value
+--- -----
+password my-long-password
+```
+
+Write the same secret at `secret2/wrapped`.
+
+```shell
+# Write a key named 'password' with its value 'my-long-password'
+$ vault kv put secret2/wrapped password="my-long-password"
+
+# Read the path to verify
+$ vault kv get secret2/wrapped
+====== Data ======
+Key Value
+--- -----
+password my-long-password
+```
+Using a valid token, you can write and read secrets the same way
+regardless of the seal wrap.
+
+
+
+#### API call using cURL
+
+Write a secret at `secret/unwrapped`.
+
+```shell
+# Create a payload
+$ tee payload.json < When Vault's Seal Wrap feature is used with a FIPS 140-2 certified HSM, Vault
+will store Critical Security Parameters (CSPs) in a manner that is compliant
+with KeyStorage and KeyTransit requirements.
+
+
+
+## Next steps
+
+This guide used the local file system as the storage backend to keep it simple.
+To learn more about making your Vault cluster highly available, read the [Vault
+HA with Consul](/guides/operations/vault-ha-consul.html) guide.
diff --git a/website/source/guides/secret-mgmt/db-root-rotation.html.md b/website/source/guides/secret-mgmt/db-root-rotation.html.md
index 72d5f103c..973ae0369 100644
--- a/website/source/guides/secret-mgmt/db-root-rotation.html.md
+++ b/website/source/guides/secret-mgmt/db-root-rotation.html.md
@@ -1,7 +1,7 @@
---
layout: "guides"
page_title: "DB Root Credential Rotation - Guides"
-sidebar_current: "guides-secret-db-root-rotation"
+sidebar_current: "guides-secret-mgmt-db-root-rotation"
description: |-
Vault enables the combined database secret engines to automate the rotation of
root credentials.
diff --git a/website/source/layouts/guides.erb b/website/source/layouts/guides.erb
index c82727912..ec892fa41 100644
--- a/website/source/layouts/guides.erb
+++ b/website/source/layouts/guides.erb
@@ -25,9 +25,12 @@
{{#if (and (not-eq model.id "default") capabilities.canDelete)}}
{{#confirm-action
- buttonClasses="button is-link is-outlined is-inverted"
+ buttonClasses="button is-ghost"
onConfirmAction=(action "deletePolicy" model)
confirmMessage=(concat "Are you sure you want to delete " model.id "?")
data-test-policy-delete=true
diff --git a/ui/tests/acceptance/access/identity/_shared-alias-tests.js b/ui/tests/acceptance/access/identity/_shared-alias-tests.js
new file mode 100644
index 000000000..284c50f7e
--- /dev/null
+++ b/ui/tests/acceptance/access/identity/_shared-alias-tests.js
@@ -0,0 +1,77 @@
+import page from 'vault/tests/pages/access/identity/aliases/add';
+import aliasIndexPage from 'vault/tests/pages/access/identity/aliases/index';
+import aliasShowPage from 'vault/tests/pages/access/identity/aliases/show';
+import createItemPage from 'vault/tests/pages/access/identity/create';
+import showItemPage from 'vault/tests/pages/access/identity/show';
+
+export const testAliasCRUD = (name, itemType, assert) => {
+ let itemID;
+ let aliasID;
+ if (itemType === 'groups') {
+ createItemPage.createItem(itemType, 'external');
+ } else {
+ createItemPage.createItem(itemType);
+ }
+ andThen(() => {
+ let idRow = showItemPage.rows.filterBy('hasLabel').filterBy('rowLabel', 'ID')[0];
+ itemID = idRow.rowValue;
+ page.visit({ item_type: itemType, id: itemID });
+ });
+ page.editForm.name(name).submit();
+ andThen(() => {
+ let idRow = aliasShowPage.rows.filterBy('hasLabel').filterBy('rowLabel', 'ID')[0];
+ aliasID = idRow.rowValue;
+ assert.equal(
+ currentRouteName(),
+ 'vault.cluster.access.identity.aliases.show',
+ 'navigates to the correct route'
+ );
+ assert.ok(
+ aliasShowPage.flashMessage.latestMessage.startsWith('Successfully saved', `${itemType}: shows a flash message`)
+ );
+ assert.ok(aliasShowPage.nameContains(name), `${itemType}: renders the name on the show page`);
+ });
+
+ aliasIndexPage.visit({ item_type: itemType });
+ andThen(() => {
+ assert.equal(aliasIndexPage.items.filterBy('id', aliasID).length, 1, `${itemType}: lists the entity in the entity list`);
+ aliasIndexPage.items.filterBy('id', aliasID)[0].menu();
+ });
+ aliasIndexPage.delete().confirmDelete();
+
+ andThen(() => {
+ assert.equal(aliasIndexPage.items.filterBy('id', aliasID).length, 0, `${itemType}: the row is deleted`);
+ aliasIndexPage.flashMessage.latestMessage.startsWith('Successfully deleted', `${itemType}: shows flash message`);
+ });
+};
+
+export const testAliasDeleteFromForm = (name, itemType, assert) => {
+ let itemID;
+ let aliasID;
+ if (itemType === 'groups') {
+ createItemPage.createItem(itemType, 'external');
+ } else {
+ createItemPage.createItem(itemType);
+ }
+ andThen(() => {
+ let idRow = showItemPage.rows.filterBy('hasLabel').filterBy('rowLabel', 'ID')[0];
+ itemID = idRow.rowValue;
+ page.visit({ item_type: itemType, id: itemID });
+ });
+ page.editForm.name(name).submit();
+ andThen(() => {
+ let idRow = aliasShowPage.rows.filterBy('hasLabel').filterBy('rowLabel', 'ID')[0];
+ aliasID = idRow.rowValue;
+ });
+ aliasShowPage.edit();
+
+ andThen(() => {
+ assert.equal(currentRouteName(), 'vault.cluster.access.identity.aliases.edit', `${itemType}: navigates to edit on create`);
+ });
+ page.editForm.delete().confirmDelete();
+ andThen(() => {
+ assert.equal(currentRouteName(), 'vault.cluster.access.identity.aliases.index', `${itemType}: navigates to list page on delete`);
+ assert.equal(aliasIndexPage.items.filterBy('id', aliasID).length, 0, `${itemType}: the row does not show in the list`);
+ aliasIndexPage.flashMessage.latestMessage.startsWith('Successfully deleted', `${itemType}: shows flash message`);
+ });
+};
diff --git a/ui/tests/acceptance/access/identity/_shared-tests.js b/ui/tests/acceptance/access/identity/_shared-tests.js
new file mode 100644
index 000000000..d91409ea5
--- /dev/null
+++ b/ui/tests/acceptance/access/identity/_shared-tests.js
@@ -0,0 +1,51 @@
+import page from 'vault/tests/pages/access/identity/create';
+import showPage from 'vault/tests/pages/access/identity/show';
+import indexPage from 'vault/tests/pages/access/identity/index';
+
+export const testCRUD = (name, itemType, assert) => {
+ let id;
+ page.visit({ item_type: itemType });
+ page.editForm.name(name).submit();
+ andThen(() => {
+ let idRow = showPage.rows.filterBy('hasLabel').filterBy('rowLabel', 'ID')[0];
+ id = idRow.rowValue;
+ assert.equal(currentRouteName(), 'vault.cluster.access.identity.show', `${itemType}: navigates to show on create`);
+ assert.ok(
+ showPage.flashMessage.latestMessage.startsWith('Successfully saved', `${itemType}: shows a flash message`)
+ );
+ assert.ok(showPage.nameContains(name), `${itemType}: renders the name on the show page`);
+ });
+
+ indexPage.visit({ item_type: itemType });
+ andThen(() => {
+ assert.equal(indexPage.items.filterBy('id', id).length, 1, `${itemType}: lists the entity in the entity list`);
+ indexPage.items.filterBy('id', id)[0].menu();
+ });
+ indexPage.delete().confirmDelete();
+
+ andThen(() => {
+ assert.equal(indexPage.items.filterBy('id', id).length, 0, `${itemType}: the row is deleted`);
+ indexPage.flashMessage.latestMessage.startsWith('Successfully deleted', `${itemType}: shows flash message`);
+ });
+};
+
+
+export const testDeleteFromForm = (name, itemType, assert) => {
+ let id;
+ page.visit({ item_type: itemType });
+ page.editForm.name(name).submit();
+ andThen(() => {
+ id = showPage.rows.filterBy('hasLabel').filterBy('rowLabel', 'ID')[0].rowValue
+ });
+ showPage.edit();
+ andThen(() => {
+ assert.equal(currentRouteName(), 'vault.cluster.access.identity.edit', `${itemType}: navigates to edit on create`);
+ });
+ page.editForm.delete().confirmDelete();
+ andThen(() => {
+ assert.equal(currentRouteName(), 'vault.cluster.access.identity.index', `${itemType}: navigates to list page on delete`);
+ assert.equal(indexPage.items.filterBy('id', id).length, 0, `${itemType}: the row does not show in the list`);
+ indexPage.flashMessage.latestMessage.startsWith('Successfully deleted', `${itemType}: shows flash message`);
+ });
+};
+
diff --git a/ui/tests/acceptance/access/identity/entities/aliases/create-test.js b/ui/tests/acceptance/access/identity/entities/aliases/create-test.js
new file mode 100644
index 000000000..81e9e2b51
--- /dev/null
+++ b/ui/tests/acceptance/access/identity/entities/aliases/create-test.js
@@ -0,0 +1,21 @@
+import { test } from 'qunit';
+import moduleForAcceptance from 'vault/tests/helpers/module-for-acceptance';
+import { testAliasCRUD, testAliasDeleteFromForm } from '../../_shared-alias-tests';
+
+moduleForAcceptance('Acceptance | /access/identity/entities/aliases/add', {
+ beforeEach() {
+ return authLogin();
+ },
+});
+
+
+test('it allows create, list, delete of an entity alias', function(assert) {
+ let name = `alias-${Date.now()}`;
+ testAliasCRUD(name, 'entities', assert);
+});
+
+test('it allows delete from the edit form', function(assert) {
+ let name = `alias-${Date.now()}`;
+ testAliasDeleteFromForm(name, 'entities', assert);
+});
+
diff --git a/ui/tests/acceptance/access/identity/entities/create-test.js b/ui/tests/acceptance/access/identity/entities/create-test.js
new file mode 100644
index 000000000..12b064229
--- /dev/null
+++ b/ui/tests/acceptance/access/identity/entities/create-test.js
@@ -0,0 +1,32 @@
+import { test } from 'qunit';
+import moduleForAcceptance from 'vault/tests/helpers/module-for-acceptance';
+import page from 'vault/tests/pages/access/identity/create';
+import { testCRUD, testDeleteFromForm } from '../_shared-tests';
+
+moduleForAcceptance('Acceptance | /access/identity/entities/create', {
+ beforeEach() {
+ return authLogin();
+ },
+});
+
+test('it visits the correct page', function(assert) {
+ page.visit({ item_type: 'entities' });
+ andThen(() => {
+ assert.equal(
+ currentRouteName(),
+ 'vault.cluster.access.identity.create',
+ 'navigates to the correct route'
+ );
+ });
+});
+
+test('it allows create, list, delete of an entity', function(assert) {
+ let name = `entity-${Date.now()}`;
+ testCRUD(name, 'entities', assert);
+});
+
+test('it can be deleted from the edit form', function(assert) {
+ let name = `entity-${Date.now()}`;
+ testDeleteFromForm(name, 'entities', assert);
+});
+
diff --git a/ui/tests/acceptance/access/identity/entities/index-test.js b/ui/tests/acceptance/access/identity/entities/index-test.js
new file mode 100644
index 000000000..e5190868d
--- /dev/null
+++ b/ui/tests/acceptance/access/identity/entities/index-test.js
@@ -0,0 +1,23 @@
+import { test } from 'qunit';
+import moduleForAcceptance from 'vault/tests/helpers/module-for-acceptance';
+import page from 'vault/tests/pages/access/identity/index';
+
+moduleForAcceptance('Acceptance | /access/identity/entities', {
+ beforeEach() {
+ return authLogin();
+ },
+});
+
+test('it renders the entities page', function(assert) {
+ page.visit({ item_type: 'entities' });
+ andThen(() => {
+ assert.equal(currentRouteName(), 'vault.cluster.access.identity.index', 'navigates to the correct route');
+ });
+});
+
+test('it renders the groups page', function(assert) {
+ page.visit({ item_type: 'groups' });
+ andThen(() => {
+ assert.equal(currentRouteName(), 'vault.cluster.access.identity.index', 'navigates to the correct route');
+ });
+});
diff --git a/ui/tests/acceptance/access/identity/groups/aliases/create-test.js b/ui/tests/acceptance/access/identity/groups/aliases/create-test.js
new file mode 100644
index 000000000..d40f94e90
--- /dev/null
+++ b/ui/tests/acceptance/access/identity/groups/aliases/create-test.js
@@ -0,0 +1,21 @@
+import { test } from 'qunit';
+import moduleForAcceptance from 'vault/tests/helpers/module-for-acceptance';
+import { testAliasCRUD, testAliasDeleteFromForm } from '../../_shared-alias-tests';
+
+moduleForAcceptance('Acceptance | /access/identity/groups/aliases/add', {
+ beforeEach() {
+ return authLogin();
+ },
+});
+
+
+test('it allows create, list, delete of an entity alias', function(assert) {
+ let name = `alias-${Date.now()}`;
+ testAliasCRUD(name, 'groups', assert);
+});
+
+test('it allows delete from the edit form', function(assert) {
+ let name = `alias-${Date.now()}`;
+ testAliasDeleteFromForm(name, 'groups', assert);
+});
+
diff --git a/ui/tests/acceptance/access/identity/groups/create-test.js b/ui/tests/acceptance/access/identity/groups/create-test.js
new file mode 100644
index 000000000..484f623d8
--- /dev/null
+++ b/ui/tests/acceptance/access/identity/groups/create-test.js
@@ -0,0 +1,31 @@
+import { test } from 'qunit';
+import moduleForAcceptance from 'vault/tests/helpers/module-for-acceptance';
+import page from 'vault/tests/pages/access/identity/create';
+import { testCRUD, testDeleteFromForm } from '../_shared-tests';
+
+moduleForAcceptance('Acceptance | /access/identity/groups/create', {
+ beforeEach() {
+ return authLogin();
+ },
+});
+
+test('it visits the correct page', function(assert) {
+ page.visit({ item_type: 'groups' });
+ andThen(() => {
+ assert.equal(
+ currentRouteName(),
+ 'vault.cluster.access.identity.create',
+ 'navigates to the correct route'
+ );
+ });
+});
+
+test('it allows create, list, delete of an group', function(assert) {
+ let name = `group-${Date.now()}`;
+ testCRUD(name, 'groups', assert);
+});
+
+test('it can be deleted from the group edit form', function(assert) {
+ let name = `group-${Date.now()}`;
+ testDeleteFromForm(name, 'groups', assert);
+});
diff --git a/ui/tests/acceptance/access/identity/index-test.js b/ui/tests/acceptance/access/identity/index-test.js
deleted file mode 100644
index ed8659a65..000000000
--- a/ui/tests/acceptance/access/identity/index-test.js
+++ /dev/null
@@ -1,16 +0,0 @@
-import { test } from 'qunit';
-import moduleForAcceptance from 'vault/tests/helpers/module-for-acceptance';
-import page from 'vault/tests/pages/access/identity/index';
-
-moduleForAcceptance('Acceptance | /access/identity/entities', {
- beforeEach() {
- return authLogin();
- },
-});
-
-test('it renders the page', function(assert) {
- page.visit({ item_type: 'entities' });
- andThen(() => {
- assert.ok(currentRouteName(), 'vault.cluster.access.identity.index', 'navigates to the correct route');
- });
-});
diff --git a/ui/tests/acceptance/enterprise-replication-test.js b/ui/tests/acceptance/enterprise-replication-test.js
index 06b9d3302..8489bc19d 100644
--- a/ui/tests/acceptance/enterprise-replication-test.js
+++ b/ui/tests/acceptance/enterprise-replication-test.js
@@ -87,7 +87,9 @@ test('replication', function(assert) {
find('[data-test-mount-config-mode]').text().trim().toLowerCase().includes(mode),
'show page renders the correct mode'
);
- assert.dom('[data-test-mount-config-paths]').hasText(mountPath, 'show page renders the correct mount path');
+ assert
+ .dom('[data-test-mount-config-paths]')
+ .hasText(mountPath, 'show page renders the correct mount path');
});
// click edit
@@ -101,10 +103,12 @@ test('replication', function(assert) {
`/vault/replication/performance/secondaries`,
'redirects to the secondaries page'
);
- assert.dom('[data-test-flash-message-body]:contains(The performance mount filter)').hasText(
- `The performance mount filter config for the secondary ${secondaryName} was successfully deleted.`,
- 'renders success flash upon deletion'
- );
+ assert
+ .dom('[data-test-flash-message-body]:contains(The performance mount filter)')
+ .hasText(
+ `The performance mount filter config for the secondary ${secondaryName} was successfully deleted.`,
+ 'renders success flash upon deletion'
+ );
click('[data-test-flash-message-body]:contains(The performance mount filter)');
});
@@ -149,10 +153,9 @@ test('replication', function(assert) {
});
click('[data-test-replication-link="secondaries"]');
andThen(() => {
- assert.dom('[data-test-secondary-name]').hasText(
- secondaryName,
- 'it displays the secondary in the list of known secondaries'
- );
+ assert
+ .dom('[data-test-secondary-name]')
+ .hasText(secondaryName, 'it displays the secondary in the list of known secondaries');
});
// disable dr replication
diff --git a/ui/tests/acceptance/leases-test.js b/ui/tests/acceptance/leases-test.js
index 8b01af3d1..1d479fe8c 100644
--- a/ui/tests/acceptance/leases-test.js
+++ b/ui/tests/acceptance/leases-test.js
@@ -51,7 +51,9 @@ test('it renders the show page', function(assert) {
'vault.cluster.access.leases.show',
'a lease for the secret is in the list'
);
- assert.dom('[data-test-lease-renew-picker]').doesNotExist('non-renewable lease does not render a renew picker');
+ assert
+ .dom('[data-test-lease-renew-picker]')
+ .doesNotExist('non-renewable lease does not render a renew picker');
});
});
@@ -65,7 +67,9 @@ skip('it renders the show page with a picker', function(assert) {
'vault.cluster.access.leases.show',
'a lease for the secret is in the list'
);
- assert.dom('[data-test-lease-renew-picker]').exists({ count: 1 }, 'renewable lease renders a renew picker');
+ assert
+ .dom('[data-test-lease-renew-picker]')
+ .exists({ count: 1 }, 'renewable lease renders a renew picker');
});
});
@@ -84,7 +88,9 @@ test('it removes leases upon revocation', function(assert) {
click(`[data-test-lease-link="${this.enginePath}/"]`);
click('[data-test-lease-link="data/"]');
andThen(() => {
- assert.dom(`[data-test-lease-link="${this.enginePath}/data/${this.name}/"]`).doesNotExist('link to the lease was removed with revocation');
+ assert
+ .dom(`[data-test-lease-link="${this.enginePath}/data/${this.name}/"]`)
+ .doesNotExist('link to the lease was removed with revocation');
});
});
@@ -99,16 +105,17 @@ test('it removes branches when a prefix is revoked', function(assert) {
'vault.cluster.access.leases.list-root',
'it navigates back to the leases root on revocation'
);
- assert.dom(`[data-test-lease-link="${this.enginePath}/"]`).doesNotExist('link to the prefix was removed with revocation');
+ assert
+ .dom(`[data-test-lease-link="${this.enginePath}/"]`)
+ .doesNotExist('link to the prefix was removed with revocation');
});
});
test('lease not found', function(assert) {
visit('/vault/access/leases/show/not-found');
andThen(() => {
- assert.dom('[data-test-lease-error]').hasText(
- 'not-found is not a valid lease ID',
- 'it shows an error when the lease is not found'
- );
+ assert
+ .dom('[data-test-lease-error]')
+ .hasText('not-found is not a valid lease ID', 'it shows an error when the lease is not found');
});
});
diff --git a/ui/tests/acceptance/policies-acl-old-test.js b/ui/tests/acceptance/policies-acl-old-test.js
index 9d161a6af..65195689c 100644
--- a/ui/tests/acceptance/policies-acl-old-test.js
+++ b/ui/tests/acceptance/policies-acl-old-test.js
@@ -46,7 +46,9 @@ test('policies', function(assert) {
});
click('[data-test-policy-list-link]');
andThen(function() {
- assert.dom(`[data-test-policy-link="${policyLower}"]`).exists({ count: 1 }, 'new policy shown in the list');
+ assert
+ .dom(`[data-test-policy-link="${policyLower}"]`)
+ .exists({ count: 1 }, 'new policy shown in the list');
});
// policy deletion
@@ -56,7 +58,9 @@ test('policies', function(assert) {
click('[data-test-confirm-button]');
andThen(function() {
assert.equal(currentURL(), `/vault/policies/acl`, 'navigates to policy list on successful deletion');
- assert.dom(`[data-test-policy-item="${policyLower}"]`).doesNotExist('deleted policy is not shown in the list');
+ assert
+ .dom(`[data-test-policy-item="${policyLower}"]`)
+ .doesNotExist('deleted policy is not shown in the list');
});
});
diff --git a/ui/tests/acceptance/settings-test.js b/ui/tests/acceptance/settings-test.js
index fba94b0c8..182d1a168 100644
--- a/ui/tests/acceptance/settings-test.js
+++ b/ui/tests/acceptance/settings-test.js
@@ -46,10 +46,6 @@ test('settings', function(assert) {
});
andThen(() => {
- assert.ok(
- currentURL(),
- '/vault/secrets/${path}/configuration',
- 'navigates to the config page'
- );
+ assert.ok(currentURL(), '/vault/secrets/${path}/configuration', 'navigates to the config page');
});
});
diff --git a/ui/tests/acceptance/ssh-test.js b/ui/tests/acceptance/ssh-test.js
index 0e9199286..9abee52b4 100644
--- a/ui/tests/acceptance/ssh-test.js
+++ b/ui/tests/acceptance/ssh-test.js
@@ -143,7 +143,9 @@ test('ssh backend', function(assert) {
click(`[data-test-confirm-button]`);
andThen(() => {
- assert.dom(`[data-test-secret-link="${role.name}"]`).doesNotExist(`${role.type}: role is no longer in the list`);
+ assert
+ .dom(`[data-test-secret-link="${role.name}"]`)
+ .doesNotExist(`${role.type}: role is no longer in the list`);
});
});
});
diff --git a/ui/tests/acceptance/tools-test.js b/ui/tests/acceptance/tools-test.js
index 7a108eb3e..e5d05d221 100644
--- a/ui/tests/acceptance/tools-test.js
+++ b/ui/tests/acceptance/tools-test.js
@@ -118,50 +118,46 @@ test('tools functionality', function(assert) {
click('[data-test-tools-b64-toggle="input"]');
click('[data-test-tools-submit]');
andThen(() => {
- assert.dom('[data-test-tools-input="sum"]').hasValue(
- 'LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564=',
- 'hashes the data, encodes input'
- );
+ assert
+ .dom('[data-test-tools-input="sum"]')
+ .hasValue('LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564=', 'hashes the data, encodes input');
});
click('[data-test-tools-back]');
fillIn('[data-test-tools-input="hash-input"]', 'e2RhdGE6ImZvbyJ9');
click('[data-test-tools-submit]');
andThen(() => {
- assert.dom('[data-test-tools-input="sum"]').hasValue(
- 'JmSi2Hhbgu2WYOrcOyTqqMdym7KT3sohCwAwaMonVrc=',
- 'hashes the data, passes b64 input through'
- );
+ assert
+ .dom('[data-test-tools-input="sum"]')
+ .hasValue('JmSi2Hhbgu2WYOrcOyTqqMdym7KT3sohCwAwaMonVrc=', 'hashes the data, passes b64 input through');
});
});
const AUTH_RESPONSE = {
- "request_id": "39802bc4-235c-2f0b-87f3-ccf38503ac3e",
- "lease_id": "",
- "renewable": false,
- "lease_duration": 0,
- "data": null,
- "wrap_info": null,
- "warnings": null,
- "auth": {
- "client_token": "ecfc2758-588e-981d-50f4-a25883bbf03c",
- "accessor": "6299780b-f2b2-1a3f-7b83-9d3d67629249",
- "policies": [
- "root"
- ],
- "metadata": null,
- "lease_duration": 0,
- "renewable": false,
- "entity_id": ""
- }
+ request_id: '39802bc4-235c-2f0b-87f3-ccf38503ac3e',
+ lease_id: '',
+ renewable: false,
+ lease_duration: 0,
+ data: null,
+ wrap_info: null,
+ warnings: null,
+ auth: {
+ client_token: 'ecfc2758-588e-981d-50f4-a25883bbf03c',
+ accessor: '6299780b-f2b2-1a3f-7b83-9d3d67629249',
+ policies: ['root'],
+ metadata: null,
+ lease_duration: 0,
+ renewable: false,
+ entity_id: '',
+ },
};
test('ensure unwrap with auth block works properly', function(assert) {
- this.server = new Pretender(function() {
- this.post('/v1/sys/wrapping/unwrap', response => {
- return [response, { 'Content-Type': 'application/json' }, JSON.stringify(AUTH_RESPONSE)];
- });
+ this.server = new Pretender(function() {
+ this.post('/v1/sys/wrapping/unwrap', response => {
+ return [response, { 'Content-Type': 'application/json' }, JSON.stringify(AUTH_RESPONSE)];
});
+ });
visit('/vault/tools');
//unwrap
click('[data-test-tools-action-link="unwrap"]');
diff --git a/ui/tests/acceptance/transit-test.js b/ui/tests/acceptance/transit-test.js
index def20a117..ca1e1110b 100644
--- a/ui/tests/acceptance/transit-test.js
+++ b/ui/tests/acceptance/transit-test.js
@@ -101,17 +101,21 @@ const testEncryption = (assert, keyName) => {
);
},
assertBeforeDecrypt: key => {
- assert.dom('[data-test-transit-input="context"]').hasValue(
- 'nqR8LiVgNh/lwO2rArJJE9F9DMhh0lKo4JX9DAAkCDw=',
- `${key}: the ui shows the base64-encoded context`
- );
+ assert
+ .dom('[data-test-transit-input="context"]')
+ .hasValue(
+ 'nqR8LiVgNh/lwO2rArJJE9F9DMhh0lKo4JX9DAAkCDw=',
+ `${key}: the ui shows the base64-encoded context`
+ );
},
assertAfterDecrypt: key => {
- assert.dom('[data-test-transit-input="plaintext"]').hasValue(
- 'NaXud2QW7KjyK6Me9ggh+zmnCeBGdG93LQED49PtoOI=',
- `${key}: the ui shows the base64-encoded plaintext`
- );
+ assert
+ .dom('[data-test-transit-input="plaintext"]')
+ .hasValue(
+ 'NaXud2QW7KjyK6Me9ggh+zmnCeBGdG93LQED49PtoOI=',
+ `${key}: the ui shows the base64-encoded plaintext`
+ );
},
},
// raw bytes for plaintext, string for context
@@ -128,13 +132,17 @@ const testEncryption = (assert, keyName) => {
);
},
assertBeforeDecrypt: key => {
- assert.dom('[data-test-transit-input="context"]').hasValue(encodeString('context'), `${key}: the ui shows the input context`);
+ assert
+ .dom('[data-test-transit-input="context"]')
+ .hasValue(encodeString('context'), `${key}: the ui shows the input context`);
},
assertAfterDecrypt: key => {
- assert.dom('[data-test-transit-input="plaintext"]').hasValue(
- 'NaXud2QW7KjyK6Me9ggh+zmnCeBGdG93LQED49PtoOI=',
- `${key}: the ui shows the base64-encoded plaintext`
- );
+ assert
+ .dom('[data-test-transit-input="plaintext"]')
+ .hasValue(
+ 'NaXud2QW7KjyK6Me9ggh+zmnCeBGdG93LQED49PtoOI=',
+ `${key}: the ui shows the base64-encoded plaintext`
+ );
},
},
// base64 input
@@ -151,10 +159,14 @@ const testEncryption = (assert, keyName) => {
);
},
assertBeforeDecrypt: key => {
- assert.dom('[data-test-transit-input="context"]').hasValue(encodeString('context'), `${key}: the ui shows the input context`);
+ assert
+ .dom('[data-test-transit-input="context"]')
+ .hasValue(encodeString('context'), `${key}: the ui shows the input context`);
},
assertAfterDecrypt: key => {
- assert.dom('[data-test-transit-input="plaintext"]').hasValue('This is the secret', `${key}: the ui decodes plaintext`);
+ assert
+ .dom('[data-test-transit-input="plaintext"]')
+ .hasValue('This is the secret', `${key}: the ui decodes plaintext`);
},
},
@@ -173,11 +185,15 @@ const testEncryption = (assert, keyName) => {
);
},
assertBeforeDecrypt: key => {
- assert.dom('[data-test-transit-input="context"]').hasValue(encodeString('secret 2'), `${key}: the ui shows the encoded context`);
+ assert
+ .dom('[data-test-transit-input="context"]')
+ .hasValue(encodeString('secret 2'), `${key}: the ui shows the encoded context`);
},
assertAfterDecrypt: key => {
assert.ok(findWithAssert('[data-test-transit-input="plaintext"]'), `${key}: plaintext box shows`);
- assert.dom('[data-test-transit-input="plaintext"]').hasValue('There are many secrets 🤐', `${key}: the ui decodes plaintext`);
+ assert
+ .dom('[data-test-transit-input="plaintext"]')
+ .hasValue('There are many secrets 🤐', `${key}: the ui decodes plaintext`);
},
},
];
@@ -229,12 +245,16 @@ test('transit backend', function(assert) {
if (index === 0) {
click('[data-test-transit-link="versions"]');
andThen(() => {
- assert.dom('[data-test-transit-key-version-row]').exists({ count: 1 }, `${key.name}: only one key version`);
+ assert
+ .dom('[data-test-transit-key-version-row]')
+ .exists({ count: 1 }, `${key.name}: only one key version`);
});
click('[data-test-transit-key-rotate] button');
click('[data-test-confirm-button]');
andThen(() => {
- assert.dom('[data-test-transit-key-version-row]').exists({ count: 2 }, `${key.name}: two key versions after rotate`);
+ assert
+ .dom('[data-test-transit-key-version-row]')
+ .exists({ count: 2 }, `${key.name}: two key versions after rotate`);
});
}
click('[data-test-transit-key-actions-link]');
@@ -256,7 +276,9 @@ test('transit backend', function(assert) {
`${key.name}: exportable key has a link to export action`
);
} else {
- assert.dom('[data-test-transit-action-link="export"]').doesNotExist(`${key.name}: non-exportable key does not link to export action`);
+ assert
+ .dom('[data-test-transit-action-link="export"]')
+ .doesNotExist(`${key.name}: non-exportable key does not link to export action`);
}
if (key.convergent && key.supportsEncryption) {
testEncryption(assert, key.name);
diff --git a/ui/tests/integration/components/identity/item-details-test.js b/ui/tests/integration/components/identity/item-details-test.js
new file mode 100644
index 000000000..4cced3a44
--- /dev/null
+++ b/ui/tests/integration/components/identity/item-details-test.js
@@ -0,0 +1,56 @@
+import { moduleForComponent, test } from 'ember-qunit';
+import hbs from 'htmlbars-inline-precompile';
+import sinon from 'sinon';
+import { create } from 'ember-cli-page-object';
+import itemDetails from 'vault/tests/pages/components/identity/item-details';
+import Ember from 'ember';
+
+const component = create(itemDetails);
+const { getOwner } = Ember;
+
+moduleForComponent('identity/item-details', 'Integration | Component | identity/item details', {
+ integration: true,
+ beforeEach() {
+ component.setContext(this);
+ getOwner(this).lookup('service:flash-messages').registerTypes(['success']);
+ },
+ afterEach() {
+ component.removeContext();
+ }
+});
+
+test('it renders the disabled warning', function(assert) {
+ let model = Ember.Object.create({
+ save() {
+ return Ember.RSVP.resolve();
+ },
+ disabled: true,
+ canEdit: true
+ });
+ sinon.spy(model, 'save');
+ this.set('model', model);
+ this.render(hbs`{{identity/item-details model=model}}`);
+ assert.dom('[data-test-disabled-warning]').exists();
+ component.enable();
+
+ assert.ok(model.save.calledOnce, 'clicking enable calls model save');
+});
+
+test('it does not render the button if canEdit is false', function(assert) {
+ let model = Ember.Object.create({
+ disabled: true
+ });
+
+ this.set('model', model);
+ this.render(hbs`{{identity/item-details model=model}}`);
+ assert.dom('[data-test-disabled-warning]').exists('shows the warning banner');
+ assert.dom('[data-test-enable]').doesNotExist('does not show the enable button');
+});
+
+test('it does not render the banner when item is enabled', function(assert) {
+ let model = Ember.Object.create();
+ this.set('model', model);
+
+ this.render(hbs`{{identity/item-details model=model}}`);
+ assert.dom('[data-test-disabled-warning]').doesNotExist('does not show the warning banner');
+});
diff --git a/ui/tests/pages/access/identity/aliases/add.js b/ui/tests/pages/access/identity/aliases/add.js
new file mode 100644
index 000000000..4be991f37
--- /dev/null
+++ b/ui/tests/pages/access/identity/aliases/add.js
@@ -0,0 +1,7 @@
+import { create, visitable } from 'ember-cli-page-object';
+import editForm from 'vault/tests/pages/components/identity/edit-form';
+
+export default create({
+ visit: visitable('/vault/access/identity/:item_type/aliases/add/:id'),
+ editForm,
+});
diff --git a/ui/tests/pages/access/identity/aliases/index.js b/ui/tests/pages/access/identity/aliases/index.js
new file mode 100644
index 000000000..297dbeab8
--- /dev/null
+++ b/ui/tests/pages/access/identity/aliases/index.js
@@ -0,0 +1,13 @@
+import { create, clickable, text, visitable, collection } from 'ember-cli-page-object';
+import flashMessage from 'vault/tests/pages/components/flash-message';
+
+export default create({
+ visit: visitable('/vault/access/identity/:item_type/aliases'),
+ flashMessage,
+ items: collection('[data-test-identity-row]', {
+ menu: clickable('[data-test-popup-menu-trigger]'),
+ id: text('[data-test-identity-link]'),
+ }),
+ delete: clickable('[data-test-item-delete] [data-test-confirm-action-trigger]'),
+ confirmDelete: clickable('[data-test-item-delete] [data-test-confirm-button]'),
+});
diff --git a/ui/tests/pages/access/identity/aliases/show.js b/ui/tests/pages/access/identity/aliases/show.js
new file mode 100644
index 000000000..5ca908ad8
--- /dev/null
+++ b/ui/tests/pages/access/identity/aliases/show.js
@@ -0,0 +1,11 @@
+import { create, clickable, collection, contains, visitable } from 'ember-cli-page-object';
+import flashMessage from 'vault/tests/pages/components/flash-message';
+import infoTableRow from 'vault/tests/pages/components/info-table-row';
+
+export default create({
+ visit: visitable('/vault/access/identity/:item_type/aliases/:alias_id'),
+ flashMessage,
+ nameContains: contains('[data-test-alias-name]'),
+ rows: collection('[data-test-component="info-table-row"]', infoTableRow),
+ edit: clickable('[data-test-alias-edit-link]')
+});
diff --git a/ui/tests/pages/access/identity/create.js b/ui/tests/pages/access/identity/create.js
new file mode 100644
index 000000000..8fbaac076
--- /dev/null
+++ b/ui/tests/pages/access/identity/create.js
@@ -0,0 +1,13 @@
+import { create, visitable } from 'ember-cli-page-object';
+import editForm from 'vault/tests/pages/components/identity/edit-form';
+
+export default create({
+ visit: visitable('/vault/access/identity/:item_type/create'),
+ editForm,
+ createItem(item_type, type) {
+ if (type) {
+ return this.visit({item_type}).editForm.type(type).submit();
+ }
+ return this.visit({item_type}).editForm.submit();
+ }
+});
diff --git a/ui/tests/pages/access/identity/index.js b/ui/tests/pages/access/identity/index.js
index 96e44360b..4a79ea39f 100644
--- a/ui/tests/pages/access/identity/index.js
+++ b/ui/tests/pages/access/identity/index.js
@@ -1,4 +1,13 @@
-import { create, visitable } from 'ember-cli-page-object';
+import { create, clickable, text, visitable, collection } from 'ember-cli-page-object';
+import flashMessage from 'vault/tests/pages/components/flash-message';
+
export default create({
visit: visitable('/vault/access/identity/:item_type'),
+ flashMessage,
+ items: collection('[data-test-identity-row]', {
+ menu: clickable('[data-test-popup-menu-trigger]'),
+ id: text('[data-test-identity-link]'),
+ }),
+ delete: clickable('[data-test-item-delete] [data-test-confirm-action-trigger]'),
+ confirmDelete: clickable('[data-test-item-delete] [data-test-confirm-button]'),
});
diff --git a/ui/tests/pages/access/identity/show.js b/ui/tests/pages/access/identity/show.js
new file mode 100644
index 000000000..ffaf79da0
--- /dev/null
+++ b/ui/tests/pages/access/identity/show.js
@@ -0,0 +1,11 @@
+import { create, clickable, collection, contains, visitable } from 'ember-cli-page-object';
+import flashMessage from 'vault/tests/pages/components/flash-message';
+import infoTableRow from 'vault/tests/pages/components/info-table-row';
+
+export default create({
+ visit: visitable('/vault/access/identity/:item_type/:item_id'),
+ flashMessage,
+ nameContains: contains('[data-test-identity-item-name]'),
+ rows: collection('[data-test-component="info-table-row"]', infoTableRow),
+ edit: clickable('[data-test-entity-edit-link]')
+});
diff --git a/ui/tests/pages/components/identity/edit-form.js b/ui/tests/pages/components/identity/edit-form.js
new file mode 100644
index 000000000..a77daa8b7
--- /dev/null
+++ b/ui/tests/pages/components/identity/edit-form.js
@@ -0,0 +1,14 @@
+import { clickable, fillable, attribute } from 'ember-cli-page-object';
+import fields from '../form-field';
+
+export default {
+ ...fields,
+ cancelLinkHref: attribute('href', '[data-test-cancel-link]'),
+ cancelLink: clickable('[data-test-cancel-link]'),
+ name: fillable('[data-test-input="name"]'),
+ disabled: clickable('[data-test-input="disabled"]'),
+ type: fillable('[data-test-input="type"]'),
+ submit: clickable('[data-test-identity-submit]'),
+ delete: clickable('[data-test-confirm-action-trigger]'),
+ confirmDelete: clickable('[data-test-confirm-button]'),
+};
diff --git a/ui/tests/pages/components/identity/item-details.js b/ui/tests/pages/components/identity/item-details.js
new file mode 100644
index 000000000..9a10b66f7
--- /dev/null
+++ b/ui/tests/pages/components/identity/item-details.js
@@ -0,0 +1,5 @@
+import { clickable } from 'ember-cli-page-object';
+
+export default {
+ enable: clickable('[data-test-enable]'),
+};
diff --git a/ui/tests/pages/components/info-table-row.js b/ui/tests/pages/components/info-table-row.js
new file mode 100644
index 000000000..9a404f97a
--- /dev/null
+++ b/ui/tests/pages/components/info-table-row.js
@@ -0,0 +1,7 @@
+import { text, isPresent } from 'ember-cli-page-object';
+
+export default {
+ hasLabel: isPresent('[data-test-row-label]'),
+ rowLabel: text('[data-test-row-label]'),
+ rowValue: text('[data-test-row-value]'),
+};
diff --git a/ui/tests/unit/components/identity/edit-form-test.js b/ui/tests/unit/components/identity/edit-form-test.js
new file mode 100644
index 000000000..82aae87cd
--- /dev/null
+++ b/ui/tests/unit/components/identity/edit-form-test.js
@@ -0,0 +1,73 @@
+import { moduleForComponent, test } from 'ember-qunit';
+import sinon from 'sinon';
+import Ember from 'ember';
+
+moduleForComponent('identity/edit-form', 'Unit | Component | identity/edit-form', {
+ unit: true,
+ needs: ['service:auth', 'service:flash-messages'],
+});
+
+let testCases = [
+ {
+ identityType: 'entity',
+ mode: 'create',
+ expected: 'vault.cluster.access.identity',
+ },
+ {
+ identityType: 'entity',
+ mode: 'edit',
+ expected: 'vault.cluster.access.identity.show',
+ },
+ {
+ identityType: 'entity-merge',
+ mode: 'merge',
+ expected: 'vault.cluster.access.identity',
+ },
+ {
+ identityType: 'entity-alias',
+ mode: 'create',
+ expected: 'vault.cluster.access.identity.aliases',
+ },
+ {
+ identityType: 'entity-alias',
+ mode: 'edit',
+ expected: 'vault.cluster.access.identity.aliases.show',
+ },
+ {
+ identityType: 'group',
+ mode: 'create',
+ expected: 'vault.cluster.access.identity',
+ },
+ {
+ identityType: 'group',
+ mode: 'edit',
+ expected: 'vault.cluster.access.identity.show',
+ },
+ {
+ identityType: 'group-alias',
+ mode: 'create',
+ expected: 'vault.cluster.access.identity.aliases',
+ },
+ {
+ identityType: 'group-alias',
+ mode: 'edit',
+ expected: 'vault.cluster.access.identity.aliases.show',
+ },
+];
+testCases.forEach(function(testCase) {
+ let model = Ember.Object.create({
+ identityType: testCase.identityType,
+ rollbackAttributes: sinon.spy(),
+ });
+ test(`it computes cancelLink properly: ${testCase.identityType} ${testCase.mode}`, function(assert) {
+ let component = this.subject();
+
+ component.set('mode', testCase.mode);
+ component.set('model', model);
+ assert.equal(
+ component.get('cancelLink'),
+ testCase.expected,
+ 'cancel link is correct'
+ );
+ });
+});
diff --git a/ui/tests/unit/services/store-test.js b/ui/tests/unit/services/store-test.js
index ab2f95fa0..4476176c3 100644
--- a/ui/tests/unit/services/store-test.js
+++ b/ui/tests/unit/services/store-test.js
@@ -89,6 +89,7 @@ test('store.constructResponse', function(assert) {
});
test('store.fetchPage', function(assert) {
+ let done = assert.async(4);
const keys = ['zero', 'one', 'two', 'three', 'four', 'five', 'six'];
const data = {
data: {
@@ -106,11 +107,14 @@ test('store.fetchPage', function(assert) {
let result;
Ember.run(() => {
- result = store.fetchPage('transit-key', query);
+ store.fetchPage('transit-key', query).then(r => {
+ result = r;
+ done();
+ });
});
assert.ok(result.get('length'), pageSize, 'returns the correct number of items');
- assert.deepEqual(result.toArray().mapBy('id'), keys.slice(0, pageSize), 'returns the first page of items');
+ assert.deepEqual(result.mapBy('id'), keys.slice(0, pageSize), 'returns the first page of items');
assert.deepEqual(
result.get('meta'),
{
@@ -125,44 +129,54 @@ test('store.fetchPage', function(assert) {
);
Ember.run(() => {
- result = store.fetchPage('transit-key', {
+ store.fetchPage('transit-key', {
size: pageSize,
page: 3,
responsePath: 'data.keys',
+ }).then(r => {
+ result = r;
+ done()
});
});
const pageThreeEnd = 3 * pageSize;
const pageThreeStart = pageThreeEnd - pageSize;
assert.deepEqual(
- result.toArray().mapBy('id'),
+ result.mapBy('id'),
keys.slice(pageThreeStart, pageThreeEnd),
'returns the third page of items'
);
Ember.run(() => {
- result = store.fetchPage('transit-key', {
+ store.fetchPage('transit-key', {
size: pageSize,
page: 99,
responsePath: 'data.keys',
+ }).then(r => {
+
+ result = r;
+ done();
});
});
assert.deepEqual(
- result.toArray().mapBy('id'),
+ result.mapBy('id'),
keys.slice(keys.length - 1),
'returns the last page when the page value is beyond the of bounds'
);
Ember.run(() => {
- result = store.fetchPage('transit-key', {
+ store.fetchPage('transit-key', {
size: pageSize,
page: 0,
responsePath: 'data.keys',
+ }).then(r => {
+ result = r;
+ done();
});
});
assert.deepEqual(
- result.toArray().mapBy('id'),
+ result.mapBy('id'),
keys.slice(0, pageSize),
'returns the first page when page value is under the bounds'
);
From 196d054f70b0da52f2a55eb898bef7f908600635 Mon Sep 17 00:00:00 2001
From: nelson
Date: Fri, 25 May 2018 00:44:44 +0800
Subject: [PATCH 26/39] Update kv-v2.html.md (#4614)
correct the payload format for "Configure the KV Engine" and "Update Metadata"
---
website/source/api/secret/kv/kv-v2.html.md | 12 ++++--------
1 file changed, 4 insertions(+), 8 deletions(-)
diff --git a/website/source/api/secret/kv/kv-v2.html.md b/website/source/api/secret/kv/kv-v2.html.md
index e4ae6797a..1ce682584 100644
--- a/website/source/api/secret/kv/kv-v2.html.md
+++ b/website/source/api/secret/kv/kv-v2.html.md
@@ -42,10 +42,8 @@ key-value store.
```json
{
- "data": {
- "max_versions": 5,
- "cas_required": false
- }
+ "max_versions": 5,
+ "cas_required": false
}
```
@@ -410,10 +408,8 @@ have an ACL policy granting the `update` capability.
```json
{
- "data": {
- "max_versions": 5,
- "cas_required": false
- }
+ "max_versions": 5,
+ "cas_required": false
}
```
From c7142ce061b02acb657a9fffe27b910d58c4e342 Mon Sep 17 00:00:00 2001
From: Brian Kassouf
Date: Thu, 24 May 2018 10:03:48 -0700
Subject: [PATCH 27/39] changelog++
---
CHANGELOG.md | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6ce4dfe2f..2b406d7f7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -52,6 +52,11 @@ BUG FIXES:
[GH-4570]
* ui: Fix HMAC algorithm in transit [GH-4604]
* ui: Fix unwrap of auth responses via the UI's unwrap tool [GH-4611]
+ * replication: Fix error while running plugins on a newly created replication
+ secondary
+ * replication: Fix issue with token store lookups after a secondary's mount table
+ is invalidated.
+ * replication: Improve startup time when a large merkle index is in use.
## 0.10.1/0.9.7 (April 25th, 2018)
From 6a2d0e71b6eeb5d048754a5e06b6ceb9eddbe395 Mon Sep 17 00:00:00 2001
From: Yoko
Date: Thu, 24 May 2018 11:39:02 -0700
Subject: [PATCH 28/39] Vault Interactive Tutorial updates (#4623)
* Added more tutorial steps
* Updated the step texts
---
website/source/_ember_steps.html.erb | 155 ++++++++++++++++--
.../demo/initializer/load-steps.js | 14 +-
2 files changed, 155 insertions(+), 14 deletions(-)
diff --git a/website/source/_ember_steps.html.erb b/website/source/_ember_steps.html.erb
index 7b6fba5e6..aa0360537 100644
--- a/website/source/_ember_steps.html.erb
+++ b/website/source/_ember_steps.html.erb
@@ -19,12 +19,14 @@
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/website/source/assets/javascripts/demo/initializer/load-steps.js b/website/source/assets/javascripts/demo/initializer/load-steps.js
index a7adee9e7..4c0c7d47e 100644
--- a/website/source/assets/javascripts/demo/initializer/load-steps.js
+++ b/website/source/assets/javascripts/demo/initializer/load-steps.js
@@ -11,9 +11,17 @@ Ember.Application.initializer({
{ id: 2, name: 'init', humanName: "Step 2: Initialize your Vault"},
{ id: 3, name: 'unseal', humanName: "Step 3: Unsealing your Vault"},
{ id: 4, name: 'auth', humanName: "Step 4: Authorize your requests"},
- { id: 5, name: 'secrets', humanName: "Step 6: Read and write secrets"},
- { id: 6, name: 'seal', humanName: "Step 7: Seal your Vault"},
- { id: 7, name: 'finish', humanName: "You're finished!"},
+ { id: 5, name: 'list', humanName: "Step 5: List available secret engines"},
+ { id: 6, name: 'secrets', humanName: "Step 6: Read and write secrets"},
+ { id: 7, name: 'update', humanName: "Step 7: Update the secret data"},
+ { id: 8, name: 'patch', humanName: "Step 8: Update the data without overwriting"},
+ { id: 9, name: 'versions', humanName: "Step 9: Work with different data versions"},
+ { id: 10, name: 'delete', humanName: "Step 10: Delete the data"},
+ { id: 11, name: 'recover', humanName: "Step 11: Recover the deleted data"},
+ { id: 12, name: 'destroy', humanName: "Step 12: Permanently delete data"},
+ { id: 13, name: 'help', humanName: "Step 13: Get Help"},
+ { id: 14, name: 'seal', humanName: "Step 14: Seal your Vault"},
+ { id: 15, name: 'finish', humanName: "You're finished!"},
]
};
From abc0975d754a55e2f10c23f8e5b6f01b6e91b193 Mon Sep 17 00:00:00 2001
From: Becca Petrin
Date: Thu, 24 May 2018 13:57:25 -0700
Subject: [PATCH 29/39] fix tests (#4636)
---
command/format.go | 1 -
1 file changed, 1 deletion(-)
diff --git a/command/format.go b/command/format.go
index 4af09e402..c4c55e00e 100644
--- a/command/format.go
+++ b/command/format.go
@@ -75,7 +75,6 @@ func Format(ui cli.Ui) string {
return ui.(*VaultUI).format
}
- panic("")
format := os.Getenv(EnvVaultFormat)
if format == "" {
format = "table"
From 17460461a0388d582ddd6a186d921b4d934c9b35 Mon Sep 17 00:00:00 2001
From: Nicholas Jackson
Date: Fri, 25 May 2018 16:34:46 +0200
Subject: [PATCH 30/39] Breakout parameters for x.509 certificate login (#4463)
---
builtin/credential/cert/backend_test.go | 209 ++++++++++++++----
builtin/credential/cert/path_certs.go | 61 ++++-
builtin/credential/cert/path_login.go | 80 +++++++
.../cert/test-fixtures/root/rootcacert.srl | 2 +-
.../cert/test-fixtures/root/rootcawdns.cnf | 17 ++
.../cert/test-fixtures/root/rootcawdns.csr | 27 +++
.../test-fixtures/root/rootcawdnscert.pem | 23 ++
.../cert/test-fixtures/root/rootcawdnskey.pem | 52 +++++
.../cert/test-fixtures/root/rootcawemail.cnf | 17 ++
.../cert/test-fixtures/root/rootcawemail.csr | 27 +++
.../test-fixtures/root/rootcawemailcert.pem | 23 ++
.../test-fixtures/root/rootcawemailkey.pem | 52 +++++
.../cert/test-fixtures/root/rootcawext.cnf | 5 -
.../cert/test-fixtures/root/rootcawuri.cnf | 18 ++
.../cert/test-fixtures/root/rootcawuri.csr | 17 ++
.../test-fixtures/root/rootcawuricert.pem | 18 ++
.../cert/test-fixtures/root/rootcawurikey.pem | 28 +++
website/source/api/auth/cert/index.html.md | 23 ++
18 files changed, 646 insertions(+), 53 deletions(-)
create mode 100644 builtin/credential/cert/test-fixtures/root/rootcawdns.cnf
create mode 100644 builtin/credential/cert/test-fixtures/root/rootcawdns.csr
create mode 100644 builtin/credential/cert/test-fixtures/root/rootcawdnscert.pem
create mode 100644 builtin/credential/cert/test-fixtures/root/rootcawdnskey.pem
create mode 100644 builtin/credential/cert/test-fixtures/root/rootcawemail.cnf
create mode 100644 builtin/credential/cert/test-fixtures/root/rootcawemail.csr
create mode 100644 builtin/credential/cert/test-fixtures/root/rootcawemailcert.pem
create mode 100644 builtin/credential/cert/test-fixtures/root/rootcawemailkey.pem
create mode 100644 builtin/credential/cert/test-fixtures/root/rootcawuri.cnf
create mode 100644 builtin/credential/cert/test-fixtures/root/rootcawuri.csr
create mode 100644 builtin/credential/cert/test-fixtures/root/rootcawuricert.pem
create mode 100644 builtin/credential/cert/test-fixtures/root/rootcawurikey.pem
diff --git a/builtin/credential/cert/backend_test.go b/builtin/credential/cert/backend_test.go
index 8942e7366..8b62ec075 100644
--- a/builtin/credential/cert/backend_test.go
+++ b/builtin/credential/cert/backend_test.go
@@ -843,9 +843,9 @@ func TestBackend_CertWrites(t *testing.T) {
tc := logicaltest.TestCase{
Backend: testFactory(t),
Steps: []logicaltest.TestStep{
- testAccStepCert(t, "aaa", ca1, "foo", "", "", false),
- testAccStepCert(t, "bbb", ca2, "foo", "", "", false),
- testAccStepCert(t, "ccc", ca3, "foo", "", "", true),
+ testAccStepCert(t, "aaa", ca1, "foo", allowed{}, false),
+ testAccStepCert(t, "bbb", ca2, "foo", allowed{}, false),
+ testAccStepCert(t, "ccc", ca3, "foo", allowed{}, true),
},
}
tc.Steps = append(tc.Steps, testAccStepListCerts(t, []string{"aaa", "bbb"})...)
@@ -866,7 +866,7 @@ func TestBackend_basic_CA(t *testing.T) {
logicaltest.Test(t, logicaltest.TestCase{
Backend: testFactory(t),
Steps: []logicaltest.TestStep{
- testAccStepCert(t, "web", ca, "foo", "", "", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{}, false),
testAccStepLogin(t, connState),
testAccStepCertLease(t, "web", ca, "foo"),
testAccStepCertTTL(t, "web", ca, "foo"),
@@ -875,9 +875,9 @@ func TestBackend_basic_CA(t *testing.T) {
testAccStepLogin(t, connState),
testAccStepCertNoLease(t, "web", ca, "foo"),
testAccStepLoginDefaultLease(t, connState),
- testAccStepCert(t, "web", ca, "foo", "*.example.com", "", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{names: "*.example.com"}, false),
testAccStepLogin(t, connState),
- testAccStepCert(t, "web", ca, "foo", "*.invalid.com", "", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{names: "*.invalid.com"}, false),
testAccStepLoginInvalid(t, connState),
},
})
@@ -926,20 +926,45 @@ func TestBackend_basic_singleCert(t *testing.T) {
logicaltest.Test(t, logicaltest.TestCase{
Backend: testFactory(t),
Steps: []logicaltest.TestStep{
- testAccStepCert(t, "web", ca, "foo", "", "", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{}, false),
testAccStepLogin(t, connState),
- testAccStepCert(t, "web", ca, "foo", "example.com", "", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com"}, false),
testAccStepLogin(t, connState),
- testAccStepCert(t, "web", ca, "foo", "invalid", "", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid"}, false),
testAccStepLoginInvalid(t, connState),
- testAccStepCert(t, "web", ca, "foo", "", "1.2.3.4:invalid", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{ext: "1.2.3.4:invalid"}, false),
testAccStepLoginInvalid(t, connState),
},
})
}
-// Test a self-signed client with custom extensions (root CA) that is trusted
-func TestBackend_extensions_singleCert(t *testing.T) {
+func TestBackend_common_name_singleCert(t *testing.T) {
+ connState, err := testConnState("test-fixtures/root/rootcacert.pem",
+ "test-fixtures/root/rootcakey.pem", "test-fixtures/root/rootcacert.pem")
+ if err != nil {
+ t.Fatalf("error testing connection state: %v", err)
+ }
+ ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: testFactory(t),
+ Steps: []logicaltest.TestStep{
+ testAccStepCert(t, "web", ca, "foo", allowed{}, false),
+ testAccStepLogin(t, connState),
+ testAccStepCert(t, "web", ca, "foo", allowed{common_names: "example.com"}, false),
+ testAccStepLogin(t, connState),
+ testAccStepCert(t, "web", ca, "foo", allowed{common_names: "invalid"}, false),
+ testAccStepLoginInvalid(t, connState),
+ testAccStepCert(t, "web", ca, "foo", allowed{ext: "1.2.3.4:invalid"}, false),
+ testAccStepLoginInvalid(t, connState),
+ },
+ })
+}
+
+// Test a self-signed client with custom ext (root CA) that is trusted
+func TestBackend_ext_singleCert(t *testing.T) {
connState, err := testConnState(
"test-fixtures/root/rootcawextcert.pem",
"test-fixtures/root/rootcawextkey.pem",
@@ -955,39 +980,132 @@ func TestBackend_extensions_singleCert(t *testing.T) {
logicaltest.Test(t, logicaltest.TestCase{
Backend: testFactory(t),
Steps: []logicaltest.TestStep{
- testAccStepCert(t, "web", ca, "foo", "", "2.1.1.1:A UTF8String Extension", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:A UTF8String Extension"}, false),
testAccStepLogin(t, connState),
- testAccStepCert(t, "web", ca, "foo", "", "2.1.1.1:*,2.1.1.2:A UTF8*", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:*,2.1.1.2:A UTF8*"}, false),
testAccStepLogin(t, connState),
- testAccStepCert(t, "web", ca, "foo", "", "1.2.3.45:*", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{ext: "1.2.3.45:*"}, false),
testAccStepLoginInvalid(t, connState),
- testAccStepCert(t, "web", ca, "foo", "", "2.1.1.1:The Wrong Value", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:The Wrong Value"}, false),
testAccStepLoginInvalid(t, connState),
- testAccStepCert(t, "web", ca, "foo", "", "2.1.1.1:*,2.1.1.2:The Wrong Value", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:*,2.1.1.2:The Wrong Value"}, false),
testAccStepLoginInvalid(t, connState),
- testAccStepCert(t, "web", ca, "foo", "", "2.1.1.1:", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:"}, false),
testAccStepLoginInvalid(t, connState),
- testAccStepCert(t, "web", ca, "foo", "", "2.1.1.1:,2.1.1.2:*", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:,2.1.1.2:*"}, false),
testAccStepLoginInvalid(t, connState),
- testAccStepCert(t, "web", ca, "foo", "example.com", "2.1.1.1:A UTF8String Extension", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "2.1.1.1:A UTF8String Extension"}, false),
testAccStepLogin(t, connState),
- testAccStepCert(t, "web", ca, "foo", "example.com", "2.1.1.1:*,2.1.1.2:A UTF8*", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "2.1.1.1:*,2.1.1.2:A UTF8*"}, false),
testAccStepLogin(t, connState),
- testAccStepCert(t, "web", ca, "foo", "example.com", "1.2.3.45:*", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "1.2.3.45:*"}, false),
testAccStepLoginInvalid(t, connState),
- testAccStepCert(t, "web", ca, "foo", "example.com", "2.1.1.1:The Wrong Value", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "2.1.1.1:The Wrong Value"}, false),
testAccStepLoginInvalid(t, connState),
- testAccStepCert(t, "web", ca, "foo", "example.com", "2.1.1.1:*,2.1.1.2:The Wrong Value", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "2.1.1.1:*,2.1.1.2:The Wrong Value"}, false),
testAccStepLoginInvalid(t, connState),
- testAccStepCert(t, "web", ca, "foo", "invalid", "2.1.1.1:A UTF8String Extension", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "2.1.1.1:A UTF8String Extension"}, false),
testAccStepLoginInvalid(t, connState),
- testAccStepCert(t, "web", ca, "foo", "invalid", "2.1.1.1:*,2.1.1.2:A UTF8*", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "2.1.1.1:*,2.1.1.2:A UTF8*"}, false),
testAccStepLoginInvalid(t, connState),
- testAccStepCert(t, "web", ca, "foo", "invalid", "1.2.3.45:*", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "1.2.3.45:*"}, false),
testAccStepLoginInvalid(t, connState),
- testAccStepCert(t, "web", ca, "foo", "invalid", "2.1.1.1:The Wrong Value", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "2.1.1.1:The Wrong Value"}, false),
testAccStepLoginInvalid(t, connState),
- testAccStepCert(t, "web", ca, "foo", "invalid", "2.1.1.1:*,2.1.1.2:The Wrong Value", false),
+ testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "2.1.1.1:*,2.1.1.2:The Wrong Value"}, false),
+ testAccStepLoginInvalid(t, connState),
+ },
+ })
+}
+
+// Test a self-signed client with URI alt names (root CA) that is trusted
+func TestBackend_dns_singleCert(t *testing.T) {
+ connState, err := testConnState(
+ "test-fixtures/root/rootcawdnscert.pem",
+ "test-fixtures/root/rootcawdnskey.pem",
+ "test-fixtures/root/rootcacert.pem",
+ )
+ if err != nil {
+ t.Fatalf("error testing connection state: %v", err)
+ }
+ ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: testFactory(t),
+ Steps: []logicaltest.TestStep{
+ testAccStepCert(t, "web", ca, "foo", allowed{dns: "example.com"}, false),
+ testAccStepLogin(t, connState),
+ testAccStepCert(t, "web", ca, "foo", allowed{dns: "*ample.com"}, false),
+ testAccStepLogin(t, connState),
+ testAccStepCert(t, "web", ca, "foo", allowed{dns: "notincert.com"}, false),
+ testAccStepLoginInvalid(t, connState),
+ testAccStepCert(t, "web", ca, "foo", allowed{dns: "abc"}, false),
+ testAccStepLoginInvalid(t, connState),
+ testAccStepCert(t, "web", ca, "foo", allowed{dns: "*.example.com"}, false),
+ testAccStepLoginInvalid(t, connState),
+ },
+ })
+}
+
+// Test a self-signed client with URI alt names (root CA) that is trusted
+func TestBackend_email_singleCert(t *testing.T) {
+ connState, err := testConnState(
+ "test-fixtures/root/rootcawemailcert.pem",
+ "test-fixtures/root/rootcawemailkey.pem",
+ "test-fixtures/root/rootcacert.pem",
+ )
+ if err != nil {
+ t.Fatalf("error testing connection state: %v", err)
+ }
+ ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: testFactory(t),
+ Steps: []logicaltest.TestStep{
+ testAccStepCert(t, "web", ca, "foo", allowed{emails: "valid@example.com"}, false),
+ testAccStepLogin(t, connState),
+ testAccStepCert(t, "web", ca, "foo", allowed{emails: "*@example.com"}, false),
+ testAccStepLogin(t, connState),
+ testAccStepCert(t, "web", ca, "foo", allowed{emails: "invalid@notincert.com"}, false),
+ testAccStepLoginInvalid(t, connState),
+ testAccStepCert(t, "web", ca, "foo", allowed{emails: "abc"}, false),
+ testAccStepLoginInvalid(t, connState),
+ testAccStepCert(t, "web", ca, "foo", allowed{emails: "*.example.com"}, false),
+ testAccStepLoginInvalid(t, connState),
+ },
+ })
+}
+
+// Test a self-signed client with URI alt names (root CA) that is trusted
+func TestBackend_uri_singleCert(t *testing.T) {
+ connState, err := testConnState(
+ "test-fixtures/root/rootcawuricert.pem",
+ "test-fixtures/root/rootcawurikey.pem",
+ "test-fixtures/root/rootcacert.pem",
+ )
+ if err != nil {
+ t.Fatalf("error testing connection state: %v", err)
+ }
+ ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: testFactory(t),
+ Steps: []logicaltest.TestStep{
+ testAccStepCert(t, "web", ca, "foo", allowed{uris: "spiffe://example.com/*"}, false),
+ testAccStepLogin(t, connState),
+ testAccStepCert(t, "web", ca, "foo", allowed{uris: "spiffe://example.com/host"}, false),
+ testAccStepLogin(t, connState),
+ testAccStepCert(t, "web", ca, "foo", allowed{uris: "spiffe://example.com/invalid"}, false),
+ testAccStepLoginInvalid(t, connState),
+ testAccStepCert(t, "web", ca, "foo", allowed{uris: "abc"}, false),
+ testAccStepLoginInvalid(t, connState),
+ testAccStepCert(t, "web", ca, "foo", allowed{uris: "http://www.google.com"}, false),
testAccStepLoginInvalid(t, connState),
},
})
@@ -1007,9 +1125,9 @@ func TestBackend_mixed_constraints(t *testing.T) {
logicaltest.Test(t, logicaltest.TestCase{
Backend: testFactory(t),
Steps: []logicaltest.TestStep{
- testAccStepCert(t, "1unconstrained", ca, "foo", "", "", false),
- testAccStepCert(t, "2matching", ca, "foo", "*.example.com,whatever", "", false),
- testAccStepCert(t, "3invalid", ca, "foo", "invalid", "", false),
+ testAccStepCert(t, "1unconstrained", ca, "foo", allowed{}, false),
+ testAccStepCert(t, "2matching", ca, "foo", allowed{names: "*.example.com,whatever"}, false),
+ testAccStepCert(t, "3invalid", ca, "foo", allowed{names: "invalid"}, false),
testAccStepLogin(t, connState),
// Assumes CertEntries are processed in alphabetical order (due to store.List), so we only match 2matching if 1unconstrained doesn't match
testAccStepLoginWithName(t, connState, "2matching"),
@@ -1314,19 +1432,32 @@ func testAccStepListCerts(
}
}
+type allowed struct {
+ names string // allowed names in the certificate, looks at common, name, dns, email [depricated]
+ common_names string // allowed common names in the certificate
+ dns string // allowed dns names in the SAN extension of the certificate
+ emails string // allowed email names in SAN extension of the certificate
+ uris string // allowed uris in SAN extension of the certificate
+ ext string // required extensions in the certificate
+}
+
func testAccStepCert(
- t *testing.T, name string, cert []byte, policies string, allowedNames string, requiredExtensions string, expectError bool) logicaltest.TestStep {
+ t *testing.T, name string, cert []byte, policies string, testData allowed, expectError bool) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "certs/" + name,
ErrorOk: expectError,
Data: map[string]interface{}{
- "certificate": string(cert),
- "policies": policies,
- "display_name": name,
- "allowed_names": allowedNames,
- "required_extensions": requiredExtensions,
- "lease": 1000,
+ "certificate": string(cert),
+ "policies": policies,
+ "display_name": name,
+ "allowed_names": testData.names,
+ "allowed_common_names": testData.common_names,
+ "allowed_dns_sans": testData.dns,
+ "allowed_email_sans": testData.emails,
+ "allowed_uri_sans": testData.uris,
+ "required_extensions": testData.ext,
+ "lease": 1000,
},
Check: func(resp *logical.Response) error {
if resp == nil && expectError {
diff --git a/builtin/credential/cert/path_certs.go b/builtin/credential/cert/path_certs.go
index 96c93ac56..384cd47fd 100644
--- a/builtin/credential/cert/path_certs.go
+++ b/builtin/credential/cert/path_certs.go
@@ -45,7 +45,33 @@ Must be x509 PEM encoded.`,
"allowed_names": &framework.FieldSchema{
Type: framework.TypeCommaStringSlice,
Description: `A comma-separated list of names.
-At least one must exist in either the Common Name or SANs. Supports globbing.`,
+At least one must exist in either the Common Name or SANs. Supports globbing.
+This parameter is deprecated, please use allowed_common_names, allowed_dns_sans,
+allowed_email_sans, allowed_uri_sans.`,
+ },
+
+ "allowed_common_names": &framework.FieldSchema{
+ Type: framework.TypeCommaStringSlice,
+ Description: `A comma-separated list of names.
+At least one must exist in the Common Name. Supports globbing.`,
+ },
+
+ "allowed_dns_sans": &framework.FieldSchema{
+ Type: framework.TypeCommaStringSlice,
+ Description: `A comma-separated list of DNS names.
+At least one must exist in the SANs. Supports globbing.`,
+ },
+
+ "allowed_email_sans": &framework.FieldSchema{
+ Type: framework.TypeCommaStringSlice,
+ Description: `A comma-separated list of Email Addresses.
+At least one must exist in the SANs. Supports globbing.`,
+ },
+
+ "allowed_uri_sans": &framework.FieldSchema{
+ Type: framework.TypeCommaStringSlice,
+ Description: `A comma-separated list of URIs.
+At least one must exist in the SANs. Supports globbing.`,
},
"required_extensions": &framework.FieldSchema{
@@ -77,12 +103,14 @@ seconds. Defaults to system/backend default TTL.`,
Description: `TTL for tokens issued by this backend.
Defaults to system/backend default TTL time.`,
},
+
"max_ttl": &framework.FieldSchema{
Type: framework.TypeDurationSecond,
Description: `Duration in either an integer number of seconds (3600) or
an integer time unit (60m) after which the
issued token can no longer be renewed.`,
},
+
"period": &framework.FieldSchema{
Type: framework.TypeDurationSecond,
Description: `If set, indicates that the token generated using this role
@@ -151,13 +179,18 @@ func (b *backend) pathCertRead(ctx context.Context, req *logical.Request, d *fra
return &logical.Response{
Data: map[string]interface{}{
- "certificate": cert.Certificate,
- "display_name": cert.DisplayName,
- "policies": cert.Policies,
- "ttl": cert.TTL / time.Second,
- "max_ttl": cert.MaxTTL / time.Second,
- "period": cert.Period / time.Second,
- "allowed_names": cert.AllowedNames,
+ "certificate": cert.Certificate,
+ "display_name": cert.DisplayName,
+ "policies": cert.Policies,
+ "ttl": cert.TTL / time.Second,
+ "max_ttl": cert.MaxTTL / time.Second,
+ "period": cert.Period / time.Second,
+ "allowed_names": cert.AllowedNames,
+ "allowed_common_names": cert.AllowedCommonNames,
+ "allowed_dns_sans": cert.AllowedDNSSANs,
+ "allowed_email_sans": cert.AllowedEmailSANs,
+ "allowed_uri_sans": cert.AllowedURISANs,
+ "required_extensions": cert.RequiredExtensions,
},
}, nil
}
@@ -168,6 +201,10 @@ func (b *backend) pathCertWrite(ctx context.Context, req *logical.Request, d *fr
displayName := d.Get("display_name").(string)
policies := policyutil.ParsePolicies(d.Get("policies"))
allowedNames := d.Get("allowed_names").([]string)
+ allowedCommonNames := d.Get("allowed_common_names").([]string)
+ allowedDNSSANs := d.Get("allowed_dns_sans").([]string)
+ allowedEmailSANs := d.Get("allowed_email_sans").([]string)
+ allowedURISANs := d.Get("allowed_uri_sans").([]string)
requiredExtensions := d.Get("required_extensions").([]string)
var resp logical.Response
@@ -246,6 +283,10 @@ func (b *backend) pathCertWrite(ctx context.Context, req *logical.Request, d *fr
DisplayName: displayName,
Policies: policies,
AllowedNames: allowedNames,
+ AllowedCommonNames: allowedCommonNames,
+ AllowedDNSSANs: allowedDNSSANs,
+ AllowedEmailSANs: allowedEmailSANs,
+ AllowedURISANs: allowedURISANs,
RequiredExtensions: requiredExtensions,
TTL: ttl,
MaxTTL: maxTTL,
@@ -278,6 +319,10 @@ type CertEntry struct {
MaxTTL time.Duration
Period time.Duration
AllowedNames []string
+ AllowedCommonNames []string
+ AllowedDNSSANs []string
+ AllowedEmailSANs []string
+ AllowedURISANs []string
RequiredExtensions []string
BoundCIDRs []*sockaddr.SockAddrMarshaler
}
diff --git a/builtin/credential/cert/path_login.go b/builtin/credential/cert/path_login.go
index cf1c6c68f..a5a3d00d9 100644
--- a/builtin/credential/cert/path_login.go
+++ b/builtin/credential/cert/path_login.go
@@ -103,6 +103,7 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, data *fra
Renewable: true,
TTL: matched.Entry.TTL,
MaxTTL: matched.Entry.MaxTTL,
+ Period: matched.Entry.Period,
},
Alias: &logical.Alias{
Name: clientCerts[0].Subject.CommonName,
@@ -253,6 +254,10 @@ func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, d
func (b *backend) matchesConstraints(clientCert *x509.Certificate, trustedChain []*x509.Certificate, config *ParsedCert) bool {
return !b.checkForChainInCRLs(trustedChain) &&
b.matchesNames(clientCert, config) &&
+ b.matchesCommonName(clientCert, config) &&
+ b.matchesDNSSANs(clientCert, config) &&
+ b.matchesEmailSANs(clientCert, config) &&
+ b.matchesURISANs(clientCert, config) &&
b.matchesCertificateExtensions(clientCert, config)
}
@@ -280,10 +285,85 @@ func (b *backend) matchesNames(clientCert *x509.Certificate, config *ParsedCert)
return true
}
}
+
}
return false
}
+// matchesCommonName verifies that the certificate matches at least one configured
+// allowed common name
+func (b *backend) matchesCommonName(clientCert *x509.Certificate, config *ParsedCert) bool {
+ // Default behavior (no names) is to allow all names
+ if len(config.Entry.AllowedCommonNames) == 0 {
+ return true
+ }
+ // At least one pattern must match at least one name if any patterns are specified
+ for _, allowedCommonName := range config.Entry.AllowedCommonNames {
+ if glob.Glob(allowedCommonName, clientCert.Subject.CommonName) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// matchesDNSSANs verifies that the certificate matches at least one configured
+// allowed dns entry in the subject alternate name extension
+func (b *backend) matchesDNSSANs(clientCert *x509.Certificate, config *ParsedCert) bool {
+ // Default behavior (no names) is to allow all names
+ if len(config.Entry.AllowedDNSSANs) == 0 {
+ return true
+ }
+ // At least one pattern must match at least one name if any patterns are specified
+ for _, allowedDNS := range config.Entry.AllowedDNSSANs {
+ for _, name := range clientCert.DNSNames {
+ if glob.Glob(allowedDNS, name) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// matchesEmailSANs verifies that the certificate matches at least one configured
+// allowed email in the subject alternate name extension
+func (b *backend) matchesEmailSANs(clientCert *x509.Certificate, config *ParsedCert) bool {
+ // Default behavior (no names) is to allow all names
+ if len(config.Entry.AllowedEmailSANs) == 0 {
+ return true
+ }
+ // At least one pattern must match at least one name if any patterns are specified
+ for _, allowedEmail := range config.Entry.AllowedEmailSANs {
+ for _, email := range clientCert.EmailAddresses {
+ if glob.Glob(allowedEmail, email) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// matchesURISANs verifies that the certificate matches at least one configured
+// allowed uri in the subject alternate name extension
+func (b *backend) matchesURISANs(clientCert *x509.Certificate, config *ParsedCert) bool {
+ // Default behavior (no names) is to allow all names
+ if len(config.Entry.AllowedURISANs) == 0 {
+ return true
+ }
+ // At least one pattern must match at least one name if any patterns are specified
+ for _, allowedURI := range config.Entry.AllowedURISANs {
+ for _, name := range clientCert.URIs {
+ if glob.Glob(allowedURI, name.String()) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
// matchesCertificateExtensions verifies that the certificate matches configured
// required extensions
func (b *backend) matchesCertificateExtensions(clientCert *x509.Certificate, config *ParsedCert) bool {
diff --git a/builtin/credential/cert/test-fixtures/root/rootcacert.srl b/builtin/credential/cert/test-fixtures/root/rootcacert.srl
index 219a6be4b..1c85d6318 100644
--- a/builtin/credential/cert/test-fixtures/root/rootcacert.srl
+++ b/builtin/credential/cert/test-fixtures/root/rootcacert.srl
@@ -1 +1 @@
-92223EAFBBEE17A3
+92223EAFBBEE17AF
diff --git a/builtin/credential/cert/test-fixtures/root/rootcawdns.cnf b/builtin/credential/cert/test-fixtures/root/rootcawdns.cnf
new file mode 100644
index 000000000..3c576a95c
--- /dev/null
+++ b/builtin/credential/cert/test-fixtures/root/rootcawdns.cnf
@@ -0,0 +1,17 @@
+[ req ]
+default_bits = 2048
+encrypt_key = no
+prompt = no
+default_md = sha256
+req_extensions = req_v3
+distinguished_name = dn
+
+[ dn ]
+CN = example.com
+
+[ req_v3 ]
+subjectAltName = @alt_names
+
+[ alt_names ]
+IP.1 = 127.0.0.1
+DNS.1 = example.com
diff --git a/builtin/credential/cert/test-fixtures/root/rootcawdns.csr b/builtin/credential/cert/test-fixtures/root/rootcawdns.csr
new file mode 100644
index 000000000..b56d32314
--- /dev/null
+++ b/builtin/credential/cert/test-fixtures/root/rootcawdns.csr
@@ -0,0 +1,27 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIIEijCCAnICAQAwFjEUMBIGA1UEAwwLZXhhbXBsZS5jb20wggIiMA0GCSqGSIb3
+DQEBAQUAA4ICDwAwggIKAoICAQDUJ6s97BFxR295bCjpwQ85Vo8DnBFa/awNH107
+QFn/zw0ZDdJMLtEBc/bw7pTYw5ulKbiZDFrmzPEY+QZlo+t1TeWgPRJg0CbYNukS
+aNv0vKXjDXYwbrCyOvZucy8hte6IKjZfH+kAsgbbUxfD75BCKsxMxbVHkg0W9Ma2
+pnZj/kpvQE5lkMj5mDvtWdfCRsVg4zL6jhRHkPZ6fOkF3mrfTbQu3oyOcbKLEE/G
+t3QRKw3uv0vMDmhg62ZPvD1k70UMjUV2MVqEPZuWY7/bbW8OsfzMyBOGY9LLp7QS
+krxWYRj6SPUR4f1bZq7pRbqOfS0okq/XDLf1k6Na5cT6iNdyjEVdSJl7vR7kSreX
+8hkwK46Oup8v/vJLu/cRDCpAas0gJJkJDPt5114V0/Xww7EFxs5GijXP8i5RLlgK
+/nRscbK+fgjQOnQ5cp0pcP8HAriy2vil7E0fQvMvt5QTyINEYgiYaCIT9WGRC8Xo
+WcoUGI2vyrGy6RU6A3/TKeBLtikaSPjFKa1dFTAHfrUkTBpfqc+sbiJ334Bvucg5
+WyS8oAC5Vf++iMnETSdzx1k0/QARVLD38PO8wPaPU1M2XaSA+RHTB9SGFc4VTauT
+B167NLlmgJHYuhp+KM1RTy1TEoDlJh2qKj21BLcR1GJ0KgDze6Vpf9xdRTdqMpo2
+h20wdQIDAQABoC8wLQYJKoZIhvcNAQkOMSAwHjAcBgNVHREEFTAThwR/AAABggtl
+eGFtcGxlLmNvbTANBgkqhkiG9w0BAQsFAAOCAgEAGtds+2IUVKzw9bi130mBbb9K
+CrXw2NXSp+LJwneIEy0bjAaNr7zCGQsj7q57qFwjc7vLTtGRheP5myrAOq00lp8J
+1sGZSETS/y4yeITLZSYWVq2dtF/hY9I+X3uOoibdsQgzYqhBcUr4oTDapf1ZEs0i
+wA2J5IcasfaBpWFc9wRN79BBACLGyCbX6VwUISrGe3Hgzkeqtg97cU62ecQsgXiZ
+LdQgERvC0wSfAmI4lGulXi1oYYSRxXQ8pEKEAMeJrJVQfvhdbS/o4Bdf3Yj6ibtD
+tFSdKLcdRCfMQBEHNpSh665LfBbwU55Fh89tBdGmf6uqsimUY6AxNncnLsc1Kq6F
+oINXix3GsBNmCahDeHdGOlNjw0Lpl0m6bnu6LXSDwwuNWAEdDfEmxR+5T/GkGxcG
+TTWPwEkpnCe4VmGl9Y10uPSvqneNsdNWjDVK4BeW4VSf9Lp1Zeme1dYFvpyzow+r
+4ogpvMPf5vy5I/0HCEf1KlaPyhs8ZGK6YBGaeEDYSaysAWJfYm8eiqwUuKYj/FUe
+G3KkaFpOGsQHFNRtG8GukV3r2AK97HFHKNfygZ2xvk5isXz2ZsNX1/J0+GGjalJl
+cWBBEiXFM94XJHE9rACsL2UKn8cWCh9lHNLlePOkQuoNY9CUd63xx4Hg97XWP3+U
+DhpG7CADsKcPJfbMgrk=
+-----END CERTIFICATE REQUEST-----
diff --git a/builtin/credential/cert/test-fixtures/root/rootcawdnscert.pem b/builtin/credential/cert/test-fixtures/root/rootcawdnscert.pem
new file mode 100644
index 000000000..2ce633e3a
--- /dev/null
+++ b/builtin/credential/cert/test-fixtures/root/rootcawdnscert.pem
@@ -0,0 +1,23 @@
+-----BEGIN CERTIFICATE-----
+MIIDzzCCAregAwIBAgIJAJIiPq+77herMA0GCSqGSIb3DQEBBQUAMBYxFDASBgNV
+BAMTC2V4YW1wbGUuY29tMB4XDTE4MDQyNjEyMDEzMFoXDTE5MDQyNjEyMDEzMFow
+FjEUMBIGA1UEAwwLZXhhbXBsZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw
+ggIKAoICAQDUJ6s97BFxR295bCjpwQ85Vo8DnBFa/awNH107QFn/zw0ZDdJMLtEB
+c/bw7pTYw5ulKbiZDFrmzPEY+QZlo+t1TeWgPRJg0CbYNukSaNv0vKXjDXYwbrCy
+OvZucy8hte6IKjZfH+kAsgbbUxfD75BCKsxMxbVHkg0W9Ma2pnZj/kpvQE5lkMj5
+mDvtWdfCRsVg4zL6jhRHkPZ6fOkF3mrfTbQu3oyOcbKLEE/Gt3QRKw3uv0vMDmhg
+62ZPvD1k70UMjUV2MVqEPZuWY7/bbW8OsfzMyBOGY9LLp7QSkrxWYRj6SPUR4f1b
+Zq7pRbqOfS0okq/XDLf1k6Na5cT6iNdyjEVdSJl7vR7kSreX8hkwK46Oup8v/vJL
+u/cRDCpAas0gJJkJDPt5114V0/Xww7EFxs5GijXP8i5RLlgK/nRscbK+fgjQOnQ5
+cp0pcP8HAriy2vil7E0fQvMvt5QTyINEYgiYaCIT9WGRC8XoWcoUGI2vyrGy6RU6
+A3/TKeBLtikaSPjFKa1dFTAHfrUkTBpfqc+sbiJ334Bvucg5WyS8oAC5Vf++iMnE
+TSdzx1k0/QARVLD38PO8wPaPU1M2XaSA+RHTB9SGFc4VTauTB167NLlmgJHYuhp+
+KM1RTy1TEoDlJh2qKj21BLcR1GJ0KgDze6Vpf9xdRTdqMpo2h20wdQIDAQABoyAw
+HjAcBgNVHREEFTAThwR/AAABggtleGFtcGxlLmNvbTANBgkqhkiG9w0BAQUFAAOC
+AQEA2JswcCYtHvOm2QmSEVeFcCeVNkzr35FXATamJv0oMMjjUFix78MW03EW6vJa
+E52e3pBvRdy+k2fuq/RtHIUKzB6jNbv0Vds26Dq+pmGeoaQZOW94/Wht7f9pZgBi
+IRPBg9oACtyNAuDsCOdetOyvyoU29sjUOUoQZbEXF+FK4lRJrEmZUJHbp/BOD58V
+mQRtjTMjQlZZropqBQmooMRYU0qgWHaIjyoQpu2MgEj3+/1b1IX6SCfRuit0auh/
+YI3/cCtyAG/DpZ6zfyXuyY+iN+l8B6t0nXyV3g8JgBWYPGJv1hgVIgnnqlwuL517
+mEAT5RnHCNJQNuzS1dwfuBrX3w==
+-----END CERTIFICATE-----
diff --git a/builtin/credential/cert/test-fixtures/root/rootcawdnskey.pem b/builtin/credential/cert/test-fixtures/root/rootcawdnskey.pem
new file mode 100644
index 000000000..15db567e0
--- /dev/null
+++ b/builtin/credential/cert/test-fixtures/root/rootcawdnskey.pem
@@ -0,0 +1,52 @@
+-----BEGIN PRIVATE KEY-----
+MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDUJ6s97BFxR295
+bCjpwQ85Vo8DnBFa/awNH107QFn/zw0ZDdJMLtEBc/bw7pTYw5ulKbiZDFrmzPEY
++QZlo+t1TeWgPRJg0CbYNukSaNv0vKXjDXYwbrCyOvZucy8hte6IKjZfH+kAsgbb
+UxfD75BCKsxMxbVHkg0W9Ma2pnZj/kpvQE5lkMj5mDvtWdfCRsVg4zL6jhRHkPZ6
+fOkF3mrfTbQu3oyOcbKLEE/Gt3QRKw3uv0vMDmhg62ZPvD1k70UMjUV2MVqEPZuW
+Y7/bbW8OsfzMyBOGY9LLp7QSkrxWYRj6SPUR4f1bZq7pRbqOfS0okq/XDLf1k6Na
+5cT6iNdyjEVdSJl7vR7kSreX8hkwK46Oup8v/vJLu/cRDCpAas0gJJkJDPt5114V
+0/Xww7EFxs5GijXP8i5RLlgK/nRscbK+fgjQOnQ5cp0pcP8HAriy2vil7E0fQvMv
+t5QTyINEYgiYaCIT9WGRC8XoWcoUGI2vyrGy6RU6A3/TKeBLtikaSPjFKa1dFTAH
+frUkTBpfqc+sbiJ334Bvucg5WyS8oAC5Vf++iMnETSdzx1k0/QARVLD38PO8wPaP
+U1M2XaSA+RHTB9SGFc4VTauTB167NLlmgJHYuhp+KM1RTy1TEoDlJh2qKj21BLcR
+1GJ0KgDze6Vpf9xdRTdqMpo2h20wdQIDAQABAoICAQDJxszUQQC554I7TsZ+xBJx
+q0Sr3zSWgOuxM2Jdpy+x38AKUx3vPRulsSBtN8yzeR9Ab7TVQ231U3f/E2GlK8kW
+sTazN0KSd4ZqX5c+3iJM21s+3p/JIo3FhdS5aa2q9zjdoqBByry135wr3xScUu22
+MLRMVEG8x0jRy45vS1UQd1teAiBN8u1ijgp5DNjrOpohMxVaPeVFx7bU+pY58bdd
+mK7FYP73v2VbY/EsA3FNntBKgQBbHFzjyR9uuI7/v53BeV9WMUxwt5OR7l8cGDHn
+HRtdvPDtAWYMMf1PKOYdlY3HBbqn/nMUCk5TKPFs8dsQWqsI8lzIIVndauj0i0+0
+M/lVMXu4x48o5FfLa4HjkpcDxAU6QDHA9thaDkasZebixVH/p1ZJkLORl5jDLYkU
+Av+B3i1efITwNYgosZNjPpw0PyYh9PV9JvB87d5wFpgISfZyRXpBVGeJbt6gg++8
+8/5A/GzSpGy0FhLcP3vuVTcX2VOexjqeaoi4U3cHrbWv/wNj5a4BNk5EJT8fVeSb
++Emqydl9u3n2E315GPC8kwxdE3r3hGrWdZQn9byGvqzwDaLWXQLQWvQN4GOpGTrP
+Yxj2Oi8s1MJHkppj4eo52O4J7cBlAJn3RFmlCKGOoWJZMdPktp/gWeT+xIGSaa21
+qB+l/ZFEWLPMxdTBMGFmYQKCAQEA8DgozaZBXr7mb1H25EbW9YmtLc61JMapQrZb
+ObygiGR6RZsxCXEvGdhvvmwpO8rA9wAOWqI8NV5GU8EvuRuwvGoX4HqbXkB6ZcyC
+6RuZzki2lrKVGUaLc1v6MyhX4IzrqTYWDgQvwd9lMcUGR7r007KPE5ft4v3/TuxQ
+qPKxQE7NO2xnTloUchd5g0/d975GZi0g6XDecFOuj43Pi0c/wRcFH6zfVirdcm+M
+yP9CsJ/LUgtV1voLqyhfybwHvzpxJ0l25Fw+P85I4czosBp+FaFAwogxZEDnY8Fr
+Hqcwdc7vwGDjTbtflDsUdppt2h8nD8bBZGysG8+P8HAt3i5D+QKCAQEA4heKueRQ
+Y8nTZlmRSRtB6usRAeymQBJjO+yWwu/06Efg8VW5QRwtP0sx+syrLaQDy8MT07II
+XQZmq55xATWbHCxULiceIY2KG5LHCovVotYAll8ov58exJva19C7/41uVrkl3H9j
+xFLX0Bn3zMFKBOxKhygP2xqqEJdb1JJt27c2CbXvXOzqIZ4RCaNQdBdrlEiXQihR
+JCGMUBfrYIwALQFzYuPGULhg77YcAi5owCPnfK+dDOOvMmW8BwPnRUc14WFIVV+m
+dbY22WonLNPP055W5755Xl9RHKW1bcmIH6E4QZpMrlnd1UzPBQq1PJtcO3uRc5T6
+CMQSUmwMGSQ3XQKCAQBiuVHborY+8AnYOkFTc+GoK5rmtosvwA2UA0neoqz/IPw3
+Wx5+GOwYnSDfi6gukJdZa8Z6bS59aG9SwJSSaNTrulZxxTHRPIKRD8nFb7h4VN3l
+dSNdreZl1KkxGSV0fbXkZvwNap8N+HeoSqbYF/fCgSHYFZqIrYadsvU7WfKK0Vf7
+UgPq6Y55jTg9RTeeN67LE0Txa5efZmTZTpi7Tt7exk0uxWdMDHXSMBIWEQIhgKqY
+31u57C2bfA5R5FrytlwGn2SjWV2j7214jzQaG+kxjoIE8OALqbjvAHC7uk5qPE/A
+KpGAQr93Ngik7baz7BWroC2eziK1k0o+sHvJUg5RAoIBABF+ftZ5axr9j+T4gzxj
+5orV24AJnqeQhKsrWFMHHC0o+qfR2T7HflzKZbihQ5GJgl2u34be3LTN/P3Eibvt
+OO5KI81aa4NvH0OY7NvNDB/IbU01WcLR/iB6asmONi3E9MezFdHk7YRQYLCSgdEP
+F7ofyniAyhFLE+OqwolFN0jr+TtxH29SSZ+GSo0zXNNOyJ01rLaKxhSEoAXGhAj5
+bD4PQa1iMIMocR+7OJmWm7ZaUNwd/onzyCefJZhpXejHZMzmqSEqAIhVLBNQmm1m
+iks2kkTmQR/jQjR0QgCXunewEtlIpixLedW6Vr5uIK3q240it5N48IvjGAPWpmz/
+l2UCggEBALRlARlBdYcPhWbcyhq9suq2PJqHVvNdP9vVTZpPgmWgxmj9mPQ731Q/
+UpRlBIk6U0qAxBXP9kzPqSivWhY8Jto80Jdr+/80PkdANDkpnXoDlxPk095sD2uN
+Jv5FffFgMZH9MGpPTEuZ571/YtVi+1qFt0i3oazpF/g8gU23f2oxaX4xzsltVl8J
+rWXYzmYE0i5Qiy81+zZ9dZlnmlKhcYpD6m2t/0hRAoNaoxOUV7WFcIzYIxpKvzYL
+QTDL/Se2Ooc0xLQvM1oZ9/1NE2hpGQ/ipASEPlx9KO5ktYW7+LwdcSCMXtx84I/D
+VQpWjPdILMpiVrB/9NsENTNv2DUvc+o=
+-----END PRIVATE KEY-----
diff --git a/builtin/credential/cert/test-fixtures/root/rootcawemail.cnf b/builtin/credential/cert/test-fixtures/root/rootcawemail.cnf
new file mode 100644
index 000000000..f679fb987
--- /dev/null
+++ b/builtin/credential/cert/test-fixtures/root/rootcawemail.cnf
@@ -0,0 +1,17 @@
+[ req ]
+default_bits = 2048
+encrypt_key = no
+prompt = no
+default_md = sha256
+distinguished_name = dn
+req_extensions = req_v3
+
+[ req_v3 ]
+subjectAltName = @alt_names
+
+[ dn ]
+CN = example.com
+
+[ alt_names ]
+IP.1 = 127.0.0.1
+email = valid@example.com
diff --git a/builtin/credential/cert/test-fixtures/root/rootcawemail.csr b/builtin/credential/cert/test-fixtures/root/rootcawemail.csr
new file mode 100644
index 000000000..44b191495
--- /dev/null
+++ b/builtin/credential/cert/test-fixtures/root/rootcawemail.csr
@@ -0,0 +1,27 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIIEkDCCAngCAQAwFjEUMBIGA1UEAwwLZXhhbXBsZS5jb20wggIiMA0GCSqGSIb3
+DQEBAQUAA4ICDwAwggIKAoICAQDO7stcdJQwUtVeJriwpAswDAirO827peSlgdxs
+fW8X9M2hE9ihvESEILb7TRMRiFlDyQYg1BxKMrJ0DZmixFi8RvUCZbH6TFOUMsk+
+w1FhpzjuqAqxNQ51s7u30sfruJg7XN3YJLEPelom62wvzhvLXJFLQZlQCDrMx+PC
+ofWs4IA7jR8JaXZjIGdkEU0GgRPy8zKPUe3dUBHi2UR4eKT4cRCn4IwCrFx4BQjV
+AxNKNDGpe+fTVOzII/UX+FppDdGZZ4g0y3E1mQUEKkff4dKCK7vhlGJR9D+5v/V0
+/stwP72aXczijuVtnXXyli+oj24NaijoqQluNCD3MvV/INovLL2Tyk54H3/GvpU1
++sJbpE2+UPh+Rh8DNkT6RPRguymJO8MSsdLt/qvVD8BlZ7I9V3XZlDKosCRTUyxf
+jjFpa+VzB3nt7uFtIXZ9HNGhQIpOULvkFGizWV+tS8PpGdTFVzDjyWg0HUKWn8g8
+IiWR9S40h6mHjVuTuxA9tlO69PuTjGK7MlAvFTDaPC8seau1LUiqtQ+prnSLI0h1
+6GfI9W2G7BKKVPloErODhLcsOcwRcmaJVW+yBda3te8+cMBIvtQYKAYSCtg8qXws
+xyfPLo4GChbGGRbRCuM3mB1lG1qHEivJ0vynsgolp0t8jaXSFVBVgYj+C6Vd9/hl
+ieUcOwIDAQABoDUwMwYJKoZIhvcNAQkOMSYwJDAiBgNVHREEGzAZhwR/AAABgRF2
+YWxpZEBleGFtcGxlLmNvbTANBgkqhkiG9w0BAQsFAAOCAgEAe1u3wYMMKaZ3d5Wz
+jKH971CF3sl+KYl+oV0ekD8dbYe+szERgajn6Y5IYVxxi5o9UgeAWqnyHCiqsW8T
+MQdUwMa67Ym/pHKSVoBlGePAKHqock0+iSsVBMcPpU9RkxdSW2aVtdb0DGfyB952
+t3dSb0LaITu30fe8p7lxrL0DKESbwd4N2XQE1F5Vf+1OodvpJinn4Wqzn45hqRf0
+imxrCgVjT5VtR+NRzKCK3Msmh+cJGpR3zgXwGKqgHLWzhvSoQwRWYE3apMK5xLk7
+N1sWVxEKK5+L/CDaMNGQFx5lPiCN3bUudCq4uSZcPLO5AuDpSeLKnknBrWA6HcbB
+WvnwtKmHeVe2qogPViKGuwE16rnPlp9hysPl2ckmtqEsXRagIAh5fMI3OoRbZmVV
+jfJm21U4YkUWuMKet3EU1StT6T8T6O7QEFA4w4s5+m3dsjDZ9iTuK9/dCs1xnIke
+4uJYmh3YrNl8IjMffJuWxA+/de3JO1UljC2EAFxa5KAc24+qyeWwky4tMv72gTOp
+6q3k2wnsrK5B1errRV37OLgxtoh1I3Rgp+0B77SOK/PpD/JJazJG5O9bBJOvHJc0
+STW9Td2CzgC2lKGfvkX6UYgVy/9HDq7/EKXP/G2f3kRik2NPUhGcnAH9nyL9SvpP
++T4CZ+FumDj5DulARk6arSq+uy4=
+-----END CERTIFICATE REQUEST-----
diff --git a/builtin/credential/cert/test-fixtures/root/rootcawemailcert.pem b/builtin/credential/cert/test-fixtures/root/rootcawemailcert.pem
new file mode 100644
index 000000000..f774a7182
--- /dev/null
+++ b/builtin/credential/cert/test-fixtures/root/rootcawemailcert.pem
@@ -0,0 +1,23 @@
+-----BEGIN CERTIFICATE-----
+MIID1TCCAr2gAwIBAgIJAJIiPq+77hevMA0GCSqGSIb3DQEBBQUAMBYxFDASBgNV
+BAMTC2V4YW1wbGUuY29tMB4XDTE4MDQyNjEyMjE1MloXDTE5MDQyNjEyMjE1Mlow
+FjEUMBIGA1UEAwwLZXhhbXBsZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw
+ggIKAoICAQDO7stcdJQwUtVeJriwpAswDAirO827peSlgdxsfW8X9M2hE9ihvESE
+ILb7TRMRiFlDyQYg1BxKMrJ0DZmixFi8RvUCZbH6TFOUMsk+w1FhpzjuqAqxNQ51
+s7u30sfruJg7XN3YJLEPelom62wvzhvLXJFLQZlQCDrMx+PCofWs4IA7jR8JaXZj
+IGdkEU0GgRPy8zKPUe3dUBHi2UR4eKT4cRCn4IwCrFx4BQjVAxNKNDGpe+fTVOzI
+I/UX+FppDdGZZ4g0y3E1mQUEKkff4dKCK7vhlGJR9D+5v/V0/stwP72aXczijuVt
+nXXyli+oj24NaijoqQluNCD3MvV/INovLL2Tyk54H3/GvpU1+sJbpE2+UPh+Rh8D
+NkT6RPRguymJO8MSsdLt/qvVD8BlZ7I9V3XZlDKosCRTUyxfjjFpa+VzB3nt7uFt
+IXZ9HNGhQIpOULvkFGizWV+tS8PpGdTFVzDjyWg0HUKWn8g8IiWR9S40h6mHjVuT
+uxA9tlO69PuTjGK7MlAvFTDaPC8seau1LUiqtQ+prnSLI0h16GfI9W2G7BKKVPlo
+ErODhLcsOcwRcmaJVW+yBda3te8+cMBIvtQYKAYSCtg8qXwsxyfPLo4GChbGGRbR
+CuM3mB1lG1qHEivJ0vynsgolp0t8jaXSFVBVgYj+C6Vd9/hlieUcOwIDAQABoyYw
+JDAiBgNVHREEGzAZhwR/AAABgRF2YWxpZEBleGFtcGxlLmNvbTANBgkqhkiG9w0B
+AQUFAAOCAQEAp2T99t93hxPyCDaqfTF0lsdzIgxZ5GkSzYTYQ2pekLfMDUUy4WFQ
+AppdnSJSpm6b+xWO2DkO8UAgOdSEORf/Qpfm+UpHaEYZlQiWQ0zNmIQgBoh6indU
+bEZKeL6aAOfIshPNfmqjFt+DpEClrQvCHJggG/rB77Ujj6hPY2+8h4JjbjeX7Pe9
+oUEx9LpZ5Qpo6PK5vB537PP7Q2qp2PIr29DLz1VeLCbqUnV+j7qT0T3hhqurnpTA
+QUiRZI0etgeP/B5lw/S4AWijq+R6RasdPAS4UNHsYK+PSGiqdhW/bJvSx5UBXQbk
+DuY2A4kdv60H5Aw45/F6enH2Fg1kg7PlQA==
+-----END CERTIFICATE-----
diff --git a/builtin/credential/cert/test-fixtures/root/rootcawemailkey.pem b/builtin/credential/cert/test-fixtures/root/rootcawemailkey.pem
new file mode 100644
index 000000000..13b165782
--- /dev/null
+++ b/builtin/credential/cert/test-fixtures/root/rootcawemailkey.pem
@@ -0,0 +1,52 @@
+-----BEGIN PRIVATE KEY-----
+MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDO7stcdJQwUtVe
+JriwpAswDAirO827peSlgdxsfW8X9M2hE9ihvESEILb7TRMRiFlDyQYg1BxKMrJ0
+DZmixFi8RvUCZbH6TFOUMsk+w1FhpzjuqAqxNQ51s7u30sfruJg7XN3YJLEPelom
+62wvzhvLXJFLQZlQCDrMx+PCofWs4IA7jR8JaXZjIGdkEU0GgRPy8zKPUe3dUBHi
+2UR4eKT4cRCn4IwCrFx4BQjVAxNKNDGpe+fTVOzII/UX+FppDdGZZ4g0y3E1mQUE
+Kkff4dKCK7vhlGJR9D+5v/V0/stwP72aXczijuVtnXXyli+oj24NaijoqQluNCD3
+MvV/INovLL2Tyk54H3/GvpU1+sJbpE2+UPh+Rh8DNkT6RPRguymJO8MSsdLt/qvV
+D8BlZ7I9V3XZlDKosCRTUyxfjjFpa+VzB3nt7uFtIXZ9HNGhQIpOULvkFGizWV+t
+S8PpGdTFVzDjyWg0HUKWn8g8IiWR9S40h6mHjVuTuxA9tlO69PuTjGK7MlAvFTDa
+PC8seau1LUiqtQ+prnSLI0h16GfI9W2G7BKKVPloErODhLcsOcwRcmaJVW+yBda3
+te8+cMBIvtQYKAYSCtg8qXwsxyfPLo4GChbGGRbRCuM3mB1lG1qHEivJ0vynsgol
+p0t8jaXSFVBVgYj+C6Vd9/hlieUcOwIDAQABAoICAQDFitqh6TxqITlFBwv6vK9d
+b696371XrFdo1F57RwcdxHnkklCUnWh/BcgIgJx6eUJV3nq2LibPgjQva6hF5NCc
+89QDNNfBjMmgyRaqjsSKx5sm4U5Lus2R+UFzi4mEcpUI3m99XhGVKAUV8Fo4DLcl
+3LlrMTVNXH3dbdj0va4NGcfwkZiWYJI+sPliYs24LtK/dADJJro/MqfQef7OTsWV
+0kHHMSoXhzlC7fNvfd8VUFw0Ym99pC3iJclc155feWyk2FwDok7xjqFmR4KTrD1M
+PLm/7+ooOFX5WdHVnULSZlb3HSJxCV7l1JJ7QXo/nKS/s59X875n8OWjdoc7lD4T
+Xw/K9CzJCyhJ/HDhTAea1+MNTig4a6wjdSim6vasig/Gkot6jjS2lhnZae8ZhYxP
+GUx4JcPthHppgHt8s6Jb2PHuqNVRmVB0x41c5mmXOnJcSqOX0XhbSbeS1TUV8BiC
+HMaa+agt7RpQOb5uxpb+Hath/88tsjDXI0ZHNAG43ndkHxSQQ9P/q/m5uaLwuJyo
+Yb06yUy/g7ceXpJFjGsjO+33DmamvligqOswgg+oazMFo8S9ZUJw6sSXhM/XiHla
+JOj+Vatfj0ViVcaGlO2kWughuCT5thn92bgC9V2VnJhbaSzSaQlRphlbuSYJEYj0
+S1uIbwPzTrcBQuekwY50YQKCAQEA/vve5K/nAnw4KLSrKwwCp9trYSm8C5czv2jV
+tn6vQtckQMrw/hubX7TcTTgTuGboGdHMwZMFJBKpx6AlRHCR5IBw8fR1z+58c+2V
+VJgllc23eKwCcBMKoe6LmsiUXOWmc7MuHc+qQS+9OemO93nNafsSwFCkucBFQs/3
+Yx7J3zNvMOuy+dq3jrxO0xl2jBF0pcmJF/czrvbMCD7tvDntgqvpAnybgrwm2cu3
+Q5F6i+E5w6VDhCprQL/aK95iT7cPmfdGxsUCdfNzDGIJFHZp2Hrar1TsOP6ESsDl
+Q/Oz9oO1vMy7MymJjWFoVELBlCBxDEgubyM1f8cE1tQ6UAqFSwKCAQEAz8HnKWPe
+NWZtqdAzSmY+3ZxSe1BbukOo4XtCV8LfRHGazKpXMTqsO9l7ynK7ifXv3b3GHTr+
+ck2Af/vyiVx6f7Ty2dmBotFQDzg0HfKD2skAPyH8cHpA8TUeL3yMOR3XQU5/pOnG
+tn84n7KWpAyZXh8gzMnmzWjMlb9pUlkKcATUj0gb8iSa9PV0zBwMKYKY0ngznJT2
+CgE1vhy59rpuUVMrQ8i5iW9jbqYVrqID+ta2DWgcLsEXft7jKfupnRHF0Dvc650p
++Lkxv0YgKjUg5sYc2QJbIiBxXaW0cTRrw/KfOe4kvdG5RMF60Six+W1DIW2l+qi3
+irnDRvRm1N6e0QKCAQEA86d5MaxJIl3TSEqEeikK7J3GuV0pHSZKQ7EI70+VaFiv
+gt6qdReqXEU2cu+QIJjtV6bcc2lq8zKGXITSt9ieAO0fgIWqgpyQ/jJcjS6qU8D1
+fnFYDwKTGXQaoTjkVPT6HvtsqP4E4i+dMZbWj/MrcAeEvpMRJZLuXE7gRi5ol0nO
+CcBhEVKILvQQmrZtSqFvhvDTeTw2fg3FoGeJw2DTbheaHE84RzBGK774C7Abm0kI
+asUkhEoInSH3eA4UgbobRXQ+hLhDhrSxDncr2ArjUALtr7eF11yWy9wR+OIK6Rio
+9JXqmJQrphcbm9ECq+poPGVJQdgySjzCigrZAh1biwKCAQBiBnFVXCOaOov/lZa9
+weRjl8BrIo1FI2tpiEjTM8U4fAm4C588QRzG2GTKLrxB6eKVU1dIr28i62J4AJ59
+JT8/RldXZoL+GZiWtcQRZT3FWxVctGJxh51gsdleOnvG70eDLtCXNR5nOTu0TgU5
+viAXAsTtG05lGM9+0GOXUR/VntHUEQfuhkr+zVmgfJNYeqA0njZr6PT134BGBTPR
+MEGg6Yb+YpT4PbBCouaUESmjju8zAC5b+Qtm9y9jvbRXwez9xWEFYpBNJMROJX5D
+q/GsMUmnMq9hOMGEmAy9ZSh7udxa7vwy++NYh5m1Wmgu8di8ywmHbVe8gs2aivKB
++dAhAoIBAQC7nSuRSmRGeKJCAqikMFPVdFdLaTnEw4i/fcyYsPa+o2hTWXyLll8K
+lwSnBxe+BCvdeQ8cWzg3rPaIBVzUjDecZjdwjcHnhlHvgHjEvFX339rvpD7J2HIb
+DaVqvtPniCFdNK4Jyvd3JMtNq34SIHFAcmB9358JuKsOwCmk8CpMAqKPVsKj7m6H
+ETISh/K8aI2vZxVZ4WN4FsQTCqmtQDXFSGpZF5EZSpMJIB3ZZLt2jyyDW2DaZ+1T
+yuVl9jU56fTtacQROQY7cvrwznX0lFpmniwl0Aj0wln/svFAqKo1+RujqApw5iYn
+ssH1dH2tESx6RpMMyLYihjHVDC/ULUVu
+-----END PRIVATE KEY-----
diff --git a/builtin/credential/cert/test-fixtures/root/rootcawext.cnf b/builtin/credential/cert/test-fixtures/root/rootcawext.cnf
index 524efd2e4..77e8258e1 100644
--- a/builtin/credential/cert/test-fixtures/root/rootcawext.cnf
+++ b/builtin/credential/cert/test-fixtures/root/rootcawext.cnf
@@ -10,12 +10,7 @@ distinguished_name = dn
CN = example.com
[ req_v3 ]
-subjectAltName = @alt_names
2.1.1.1=ASN1:UTF8String:A UTF8String Extension
2.1.1.2=ASN1:UTF8:A UTF8 Extension
2.1.1.3=ASN1:IA5:An IA5 Extension
2.1.1.4=ASN1:VISIBLE:A Visible Extension
-
-[ alt_names ]
-DNS.1 = example.com
-IP.1 = 127.0.0.1
diff --git a/builtin/credential/cert/test-fixtures/root/rootcawuri.cnf b/builtin/credential/cert/test-fixtures/root/rootcawuri.cnf
new file mode 100644
index 000000000..bb15540ca
--- /dev/null
+++ b/builtin/credential/cert/test-fixtures/root/rootcawuri.cnf
@@ -0,0 +1,18 @@
+[ req ]
+default_bits = 2048
+encrypt_key = no
+prompt = no
+default_md = sha256
+req_extensions = req_v3
+distinguished_name = dn
+
+[ dn ]
+CN = example.com
+
+[ req_v3 ]
+subjectAltName = @alt_names
+
+[ alt_names ]
+DNS.1 = example.com
+IP.1 = 127.0.0.1
+URI.1 = spiffe://example.com/host
diff --git a/builtin/credential/cert/test-fixtures/root/rootcawuri.csr b/builtin/credential/cert/test-fixtures/root/rootcawuri.csr
new file mode 100644
index 000000000..0ababe1ce
--- /dev/null
+++ b/builtin/credential/cert/test-fixtures/root/rootcawuri.csr
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICpTCCAY0CAQAwFjEUMBIGA1UEAwwLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQDEtoz6THzA8RFNJ+wu40Pa30Inyprv3xRGYA71
+0T3yLrWUA0xaS8i7HHXDaEVmtHi7I+dFRqGwCgtDLY3sXN1C1t/U6V6xhhQ1hRW7
+PJhbGfsfi8uBx83amWiSMlmEBYPryQzPS+8mmRErBi6EdmgbdGWV5IcovMddDxE1
+Npc1vwmTxDUOe6mRSa8UkaR9nwFl8LTz9clIkGlOJLHWD2oX15PVr7SKYco+MrIh
+HLKkYMgATFJ05EKLyRxO/lQWD6ibUYJuGhFeNyjk34swl3uoWQBGndxcs2BQP4OL
+EfnsoXVDrHwjZ1FWSu/Bf6TfKvwo5It1IZLnm+cCTqxCnaLRAgMBAAGgSjBIBgkq
+hkiG9w0BCQ4xOzA5MDcGA1UdEQQwMC6CC2V4YW1wbGUuY29thwR/AAABhhlzcGlm
+ZmU6Ly9leGFtcGxlLmNvbS9ob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQBw2y7bPrLk
+B7DrZRvO/s8yj/Mi2iS/q3KEACEUxHTXH9GrqnQJ1n00WjaEu5JgXW8F08738nj/
+QhO5IM9ZMBtFyt9/GguZzGWnGUGUvtfM/ps/qzF6lAnjxYnFfqJeDWhg4SQsW6ZW
+eFZ3S1kx0iQjy+Y7oWZNObbgDhszdJa6swN1WJBB8BZuiDJYXMBzfWdR6aZStJ0Z
+lUHyaQbILXRc+meuDY7KeILJhldlE8oU/NENO1w1WXcsseXg8790pPYg+uR/uXg0
+0iWPtqgjO+55eAvkZ5nY0N/kABV1oaCB8bVs6/2HPqquPX6c+xkcUI/HY8SJgWzk
+AHCG7VIB4W94
+-----END CERTIFICATE REQUEST-----
diff --git a/builtin/credential/cert/test-fixtures/root/rootcawuricert.pem b/builtin/credential/cert/test-fixtures/root/rootcawuricert.pem
new file mode 100644
index 000000000..171f4de00
--- /dev/null
+++ b/builtin/credential/cert/test-fixtures/root/rootcawuricert.pem
@@ -0,0 +1,18 @@
+-----BEGIN CERTIFICATE-----
+MIIC6jCCAdKgAwIBAgIJAJIiPq+77hekMA0GCSqGSIb3DQEBBQUAMBYxFDASBgNV
+BAMTC2V4YW1wbGUuY29tMB4XDTE4MDMzMTE2MTE0NVoXDTE5MDMzMTE2MTE0NVow
+FjEUMBIGA1UEAwwLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
+ggEKAoIBAQDEtoz6THzA8RFNJ+wu40Pa30Inyprv3xRGYA710T3yLrWUA0xaS8i7
+HHXDaEVmtHi7I+dFRqGwCgtDLY3sXN1C1t/U6V6xhhQ1hRW7PJhbGfsfi8uBx83a
+mWiSMlmEBYPryQzPS+8mmRErBi6EdmgbdGWV5IcovMddDxE1Npc1vwmTxDUOe6mR
+Sa8UkaR9nwFl8LTz9clIkGlOJLHWD2oX15PVr7SKYco+MrIhHLKkYMgATFJ05EKL
+yRxO/lQWD6ibUYJuGhFeNyjk34swl3uoWQBGndxcs2BQP4OLEfnsoXVDrHwjZ1FW
+Su/Bf6TfKvwo5It1IZLnm+cCTqxCnaLRAgMBAAGjOzA5MDcGA1UdEQQwMC6CC2V4
+YW1wbGUuY29thwR/AAABhhlzcGlmZmU6Ly9leGFtcGxlLmNvbS9ob3N0MA0GCSqG
+SIb3DQEBBQUAA4IBAQDhR59hSpL4k4wbK3bA17YoNwFBsDpDcoU2iB9NDUTj+j+T
+Rgumt+VHtgxuGRDFPQ+0D2hmJJHNCHKulgeDKVLtY/c5dCEsk8epLQwoCd/pQsNR
+Lj102g83rCrU0pfTFjAUoecmHBFt7GDxVyWDsJgGItMatPQuWyZXTzO8JdhCfpMP
+m7z65VYZjIPgevpSR5NVJDU8u2jRCkRQBFqOXotJS6EObu4P8aly4YhwiMf1B0L8
+60XHbBksOQSZOky37uFhaab78bAu5nd2kN1K4qSObTJshCZAwRYk0XdCjDrMcZRJ
+Fp+yygib+K8e7o71Co0zUdSU0yxOKGsWvjz1BUVl
+-----END CERTIFICATE-----
diff --git a/builtin/credential/cert/test-fixtures/root/rootcawurikey.pem b/builtin/credential/cert/test-fixtures/root/rootcawurikey.pem
new file mode 100644
index 000000000..81ac97829
--- /dev/null
+++ b/builtin/credential/cert/test-fixtures/root/rootcawurikey.pem
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDEtoz6THzA8RFN
+J+wu40Pa30Inyprv3xRGYA710T3yLrWUA0xaS8i7HHXDaEVmtHi7I+dFRqGwCgtD
+LY3sXN1C1t/U6V6xhhQ1hRW7PJhbGfsfi8uBx83amWiSMlmEBYPryQzPS+8mmREr
+Bi6EdmgbdGWV5IcovMddDxE1Npc1vwmTxDUOe6mRSa8UkaR9nwFl8LTz9clIkGlO
+JLHWD2oX15PVr7SKYco+MrIhHLKkYMgATFJ05EKLyRxO/lQWD6ibUYJuGhFeNyjk
+34swl3uoWQBGndxcs2BQP4OLEfnsoXVDrHwjZ1FWSu/Bf6TfKvwo5It1IZLnm+cC
+TqxCnaLRAgMBAAECggEAYLdYbR/6HmroFMVSLGN000H9ps7IirNlpoxIDrhH+rDY
+eeN9QNAN62E8zUyRAsQsr+YhKUBm8sSdcPQO2W13JAu9lVMAScwgV4gNfTd3uSL3
+AzWaYz63iYjvjyHOPUjw6Za6A5nUBWgwtrSdXmdRHF6IK8Bma7MVWj20OjOS+MsM
+ScXk+yMTzpQYZ+AhP6rgcccn6djtk+Mqrpa7yW5cTDkQ0+/MF0KR7tYUbakRSimI
+Ph6e+zFt4infOWP5fDr0oSpMXA2chh0INTtxbltnJzvaaPF8LSzyihWTZszABc84
+Ckgrvmt5DViYbmfKHk0csS/xF/wdygfkkJHML8l/IQKBgQD9CMaDgfpM78uH8Kgm
+Ja/ANu4Te5zO/n5E96PHdvCN+m7pCMgYuXuKgXDADgD1O6MItzDnEpkubluffARf
+1eJyw9ner0tTAs8bZgtKdLQvaghq5Afk1+m8XDTskJsVLVGrozvJLuabPqnZrkRH
+AxLdZjiAh6z2csFVYTQnMQSfhQKBgQDHBMjapcDx9y/jUq/yoFvwkdT3THQO9JgK
+XC5NOHGVhyT3695wpqi/ANA4b8P9MmAzcUkT8a3jcqV87OIQmK3Y1oGvjHQCKS60
+OYE9TadpxwW2uzxS5T7YegXf5L3uHinoWHlLklN+Q9pvJStw4QrDzhd8rtcZA+FN
+KBmjzYdJ3QKBgQDYutl97qi7mXEVgPYlpoYA94u4OFq5mZYB8LLhuGiW03iINbNe
+KhE9M12lwtjjNC+S2YYThgSaln/3/LuqcoLBlitY54B3G6LVbvQg1BE5w3JuS97P
+Dnjvk3LpZXrQCr83altdGMUBGA1XnEJzKJjR9ipTPOLTPLuIK/gF0aCKGQKBgQCm
+ZFitfZGge4M9Mt/KIcpciwCcNf5+ln8bglBv3XYRhykgYsLaOmyxLLPpy3/4DAsk
+V1263//7PtofZUnoiE4pEcbhh7NiLx5OLhngsDD9Hhmn2kkoIWR2xyZsN6mYEP4G
+tRnMVi2aTo6tCE2WlYBTjtZSNze9QWI4CQPO0MKAvQKBgQCzpJAJXl04zQv9S5uW
+pH3xShmd0Zjv9tNyOVNqWUeg47IFzNC2w/6FqYkhd9C4DCAibzPx7WkVjYAR+ivY
+NQv1usVhV3maJX5rw+C4Zck8kAmiqMbLacUVdy/5E2Mbk7xqjAvu+qrMFdSk/2GR
+raR1xOEvE0cKWIwr8c8wIva4wA==
+-----END PRIVATE KEY-----
diff --git a/website/source/api/auth/cert/index.html.md b/website/source/api/auth/cert/index.html.md
index a3d2f505f..a189997b8 100644
--- a/website/source/api/auth/cert/index.html.md
+++ b/website/source/api/auth/cert/index.html.md
@@ -34,6 +34,29 @@ Sets a CA cert and associated parameters in a role name.
(https://github.com/ryanuber/go-glob/blob/master/README.md#example). Value is
a comma-separated list of patterns. Authentication requires at least one Name
matching at least one pattern. If not set, defaults to allowing all names.
+ Note: This parameter is deprecated please use individual parameters
+ allowed_common_names, allowed_dns_sans, allowed_email_sans, allowed_uri_sans,
+ required_extensions
+- `allowed_common_names` `(string: "")` - Constrain the Common Names in
+ the client certificate with a [globbed pattern]
+ (https://github.com/ryanuber/go-glob/blob/master/README.md#example). Value is
+ a comma-separated list of patterns. Authentication requires at least one Name
+ matching at least one pattern. If not set, defaults to allowing all names.
+- `allowed_dns_sans` `(string: "")` - Constrain the Alternative Names in
+ the client certificate with a [globbed pattern]
+ (https://github.com/ryanuber/go-glob/blob/master/README.md#example). Value is
+ a comma-separated list of patterns. Authentication requires at least one DNS
+ matching at least one pattern. If not set, defaults to allowing all dns.
+- `allowed_email_sans` `(string: "")` - Constrain the Alternative Names in
+ the client certificate with a [globbed pattern]
+ (https://github.com/ryanuber/go-glob/blob/master/README.md#example). Value is
+ a comma-separated list of patterns. Authentication requires at least one Email
+ matching at least one pattern. If not set, defaults to allowing all emails.
+- `allowed_uri_sans` `(string: "")` - Constrain the Alternative Names in
+ the client certificate with a [globbed pattern]
+ (https://github.com/ryanuber/go-glob/blob/master/README.md#example). Value is
+ a comma-separated list of URI patterns. Authentication requires at least one URI
+ matching at least one pattern. If not set, defaults to allowing all URIs.
- `required_extensions` `(string: "" or array:[])` - Require specific Custom
Extension OIDs to exist and match the pattern. Value is a comma separated
string or array of `oid:value`. Expects the extension value to be some type
From 52cb8234a66f048536f95da1f7864f0e497e1eb4 Mon Sep 17 00:00:00 2001
From: Jeff Mitchell
Date: Fri, 25 May 2018 10:39:23 -0400
Subject: [PATCH 31/39] Changelogify and fix some minor website bits
---
CHANGELOG.md | 2 +
website/source/api/auth/cert/index.html.md | 43 +++++++++++-----------
2 files changed, 23 insertions(+), 22 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2b406d7f7..fac6031aa 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -21,6 +21,8 @@ IMPROVEMENTS:
* api: Close renewer's doneCh when the renewer is stopped, so that programs
expecting a final value through doneCh behave correctly [GH-4472]
+ * auth/cert: Break out `allowed_names` into component parts and add
+ `allowed_uri_sans` [GH-4231]
* cli: `vault login` now supports a `-no-print` flag to suppress printing
token information but still allow storing into the token helper [GH-4454]
* core/pkcs11 (enterprise): Add support for CKM_AES_CBS_PAD, CKM_RSA_PKCS, and
diff --git a/website/source/api/auth/cert/index.html.md b/website/source/api/auth/cert/index.html.md
index a189997b8..655a89521 100644
--- a/website/source/api/auth/cert/index.html.md
+++ b/website/source/api/auth/cert/index.html.md
@@ -29,39 +29,38 @@ Sets a CA cert and associated parameters in a role name.
- `name` `(string: )` - The name of the certificate role.
- `certificate` `(string: )` - The PEM-format CA certificate.
-- `allowed_names` `(string: "")` - Constrain the Common and Alternative Names in
- the client certificate with a [globbed pattern]
+- `allowed_names` `(string: "")` - DEPRECATED: Please use the individual
+ `allowed_X_sans` parameters instead. Constrain the Common and Alternative
+ Names in the client certificate with a [globbed pattern]
(https://github.com/ryanuber/go-glob/blob/master/README.md#example). Value is
a comma-separated list of patterns. Authentication requires at least one Name
matching at least one pattern. If not set, defaults to allowing all names.
- Note: This parameter is deprecated please use individual parameters
- allowed_common_names, allowed_dns_sans, allowed_email_sans, allowed_uri_sans,
- required_extensions
-- `allowed_common_names` `(string: "")` - Constrain the Common Names in
- the client certificate with a [globbed pattern]
+- `allowed_common_names` `(string: "" or array: [])` - Constrain the Common
+ Names in the client certificate with a [globbed pattern]
(https://github.com/ryanuber/go-glob/blob/master/README.md#example). Value is
a comma-separated list of patterns. Authentication requires at least one Name
matching at least one pattern. If not set, defaults to allowing all names.
-- `allowed_dns_sans` `(string: "")` - Constrain the Alternative Names in
- the client certificate with a [globbed pattern]
+- `allowed_dns_sans` `(string: "" or array: [])` - Constrain the Alternative
+ Names in the client certificate with a [globbed pattern]
(https://github.com/ryanuber/go-glob/blob/master/README.md#example). Value is
a comma-separated list of patterns. Authentication requires at least one DNS
matching at least one pattern. If not set, defaults to allowing all dns.
-- `allowed_email_sans` `(string: "")` - Constrain the Alternative Names in
- the client certificate with a [globbed pattern]
+- `allowed_email_sans` `(string: "" or array: [])` - Constrain the Alternative
+ Names in the client certificate with a [globbed pattern]
(https://github.com/ryanuber/go-glob/blob/master/README.md#example). Value is
- a comma-separated list of patterns. Authentication requires at least one Email
- matching at least one pattern. If not set, defaults to allowing all emails.
-- `allowed_uri_sans` `(string: "")` - Constrain the Alternative Names in
- the client certificate with a [globbed pattern]
+ a comma-separated list of patterns. Authentication requires at least one
+ Email matching at least one pattern. If not set, defaults to allowing all
+ emails.
+- `allowed_uri_sans` `(string: "" or array: [])` - Constrain the Alternative
+ Names in the client certificate with a [globbed pattern]
(https://github.com/ryanuber/go-glob/blob/master/README.md#example). Value is
- a comma-separated list of URI patterns. Authentication requires at least one URI
- matching at least one pattern. If not set, defaults to allowing all URIs.
-- `required_extensions` `(string: "" or array:[])` - Require specific Custom
- Extension OIDs to exist and match the pattern. Value is a comma separated
- string or array of `oid:value`. Expects the extension value to be some type
- of ASN1 encoded string. All conditions _must_ be met. Supports globbing on
- `value`.
+ a comma-separated list of URI patterns. Authentication requires at least one
+ URI matching at least one pattern. If not set, defaults to allowing all URIs.
+- `required_extensions` `(string: "" or array: [])` - Require specific Custom
+ Extension OIDs to exist and match the pattern. Value is a comma separated
+ string or array of `oid:value`. Expects the extension value to be some type
+ of ASN1 encoded string. All conditions _must_ be met. Supports globbing on
+ `value`.
- `policies` `(string: "")` - A comma-separated list of policies to set on
tokens issued when authenticating against this CA certificate.
- `display_name` `(string: "")` - The `display_name` to set on tokens issued
From 3e95305efa4d319d2d95d56bbe2a0917725b186d Mon Sep 17 00:00:00 2001
From: Jeff Mitchell
Date: Fri, 25 May 2018 11:54:36 -0400
Subject: [PATCH 32/39] Fix mistaken extra Period value
---
builtin/credential/cert/path_login.go | 1 -
1 file changed, 1 deletion(-)
diff --git a/builtin/credential/cert/path_login.go b/builtin/credential/cert/path_login.go
index a5a3d00d9..767b6fcbe 100644
--- a/builtin/credential/cert/path_login.go
+++ b/builtin/credential/cert/path_login.go
@@ -103,7 +103,6 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, data *fra
Renewable: true,
TTL: matched.Entry.TTL,
MaxTTL: matched.Entry.MaxTTL,
- Period: matched.Entry.Period,
},
Alias: &logical.Alias{
Name: clientCerts[0].Subject.CommonName,
From d8533758227efc309ecf3b3d5821d5575acd3f36 Mon Sep 17 00:00:00 2001
From: Chris Hoffman
Date: Fri, 25 May 2018 12:52:49 -0400
Subject: [PATCH 33/39] changelog++
---
CHANGELOG.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index fac6031aa..08450ba9e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -25,7 +25,7 @@ IMPROVEMENTS:
`allowed_uri_sans` [GH-4231]
* cli: `vault login` now supports a `-no-print` flag to suppress printing
token information but still allow storing into the token helper [GH-4454]
- * core/pkcs11 (enterprise): Add support for CKM_AES_CBS_PAD, CKM_RSA_PKCS, and
+ * core/pkcs11 (enterprise): Add support for CKM_AES_CBC_PAD, CKM_RSA_PKCS, and
CKM_RSA_PKCS_OAEP mechanisms
* core/pkcs11 (enterprise): HSM slots can now be selected by token label instead
of just slot number
From fa372dc1ae2322a914f52d032e02b9fc0955cd32 Mon Sep 17 00:00:00 2001
From: Chris Hoffman
Date: Fri, 25 May 2018 12:53:26 -0400
Subject: [PATCH 34/39] changelog++
---
CHANGELOG.md | 1 -
1 file changed, 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 08450ba9e..a0e9880a3 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -29,7 +29,6 @@ IMPROVEMENTS:
CKM_RSA_PKCS_OAEP mechanisms
* core/pkcs11 (enterprise): HSM slots can now be selected by token label instead
of just slot number
- * core/seal (enterprise):
* expiration: Allow revoke-prefix and revoke-force to work on single leases as
well as prefixes [GH-4450]
From a259c16c8b804badc044ddee94a25ee5c40e8f40 Mon Sep 17 00:00:00 2001
From: Martin
Date: Fri, 25 May 2018 19:39:35 +0200
Subject: [PATCH 35/39] Typo in Visibility + Add accepted values in Description
(#4638)
---
vault/logical_system.go | 6 +++---
vault/mount.go | 40 ++++++++++++++++++++--------------------
2 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/vault/logical_system.go b/vault/logical_system.go
index 663576b74..2662615bf 100644
--- a/vault/logical_system.go
+++ b/vault/logical_system.go
@@ -2092,7 +2092,7 @@ func (b *SystemBackend) handleTuneWriteCommon(ctx context.Context, path string,
if rawVal, ok := data.GetOk("listing_visibility"); ok {
lvString := rawVal.(string)
- listingVisibility := ListingVisiblityType(lvString)
+ listingVisibility := ListingVisibilityType(lvString)
if err := checkListingVisibility(listingVisibility); err != nil {
return logical.ErrorResponse(fmt.Sprintf("invalid listing_visibility %s", listingVisibility)), nil
@@ -3744,7 +3744,7 @@ func sanitizeMountPath(path string) string {
return path
}
-func checkListingVisibility(visibility ListingVisiblityType) error {
+func checkListingVisibility(visibility ListingVisibilityType) error {
switch visibility {
case ListingVisibilityHidden:
case ListingVisibilityUnauth:
@@ -4341,7 +4341,7 @@ This path responds to the following HTTP methods.
"This function can be used to generate high-entropy random bytes.",
},
"listing_visibility": {
- "Determines the visibility of the mount in the UI-specific listing endpoint.",
+ "Determines the visibility of the mount in the UI-specific listing endpoint. Accepted value are 'unauth' and ''.",
"",
},
"passthrough_request_headers": {
diff --git a/vault/mount.go b/vault/mount.go
index 115a33cfe..7a2c70b90 100644
--- a/vault/mount.go
+++ b/vault/mount.go
@@ -41,14 +41,14 @@ const (
mountTableType = "mounts"
)
-// ListingVisiblityType represents the types for listing visilibity
-type ListingVisiblityType string
+// ListingVisibilityType represents the types for listing visibility
+type ListingVisibilityType string
const (
// ListingVisibilityHidden is the hidden type for listing visibility
- ListingVisibilityHidden ListingVisiblityType = ""
+ ListingVisibilityHidden ListingVisibilityType = ""
// ListingVisibilityUnauth is the unauth type for listing visibility
- ListingVisibilityUnauth ListingVisiblityType = "unauth"
+ ListingVisibilityUnauth ListingVisibilityType = "unauth"
)
var (
@@ -194,26 +194,26 @@ type MountEntry struct {
// MountConfig is used to hold settable options
type MountConfig struct {
- DefaultLeaseTTL time.Duration `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` // Override for global default
- MaxLeaseTTL time.Duration `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` // Override for global default
- ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"` // Override for global default
- PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
- AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" structs:"audit_non_hmac_request_keys" mapstructure:"audit_non_hmac_request_keys"`
- AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" structs:"audit_non_hmac_response_keys" mapstructure:"audit_non_hmac_response_keys"`
- ListingVisibility ListingVisiblityType `json:"listing_visibility,omitempty" structs:"listing_visibility" mapstructure:"listing_visibility"`
- PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" structs:"passthrough_request_headers" mapstructure:"passthrough_request_headers"`
+ DefaultLeaseTTL time.Duration `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` // Override for global default
+ MaxLeaseTTL time.Duration `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` // Override for global default
+ ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"` // Override for global default
+ PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" structs:"audit_non_hmac_request_keys" mapstructure:"audit_non_hmac_request_keys"`
+ AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" structs:"audit_non_hmac_response_keys" mapstructure:"audit_non_hmac_response_keys"`
+ ListingVisibility ListingVisibilityType `json:"listing_visibility,omitempty" structs:"listing_visibility" mapstructure:"listing_visibility"`
+ PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" structs:"passthrough_request_headers" mapstructure:"passthrough_request_headers"`
}
// APIMountConfig is an embedded struct of api.MountConfigInput
type APIMountConfig struct {
- DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
- MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
- ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
- PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
- AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" structs:"audit_non_hmac_request_keys" mapstructure:"audit_non_hmac_request_keys"`
- AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" structs:"audit_non_hmac_response_keys" mapstructure:"audit_non_hmac_response_keys"`
- ListingVisibility ListingVisiblityType `json:"listing_visibility,omitempty" structs:"listing_visibility" mapstructure:"listing_visibility"`
- PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" structs:"passthrough_request_headers" mapstructure:"passthrough_request_headers"`
+ DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
+ PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" structs:"audit_non_hmac_request_keys" mapstructure:"audit_non_hmac_request_keys"`
+ AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" structs:"audit_non_hmac_response_keys" mapstructure:"audit_non_hmac_response_keys"`
+ ListingVisibility ListingVisibilityType `json:"listing_visibility,omitempty" structs:"listing_visibility" mapstructure:"listing_visibility"`
+ PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" structs:"passthrough_request_headers" mapstructure:"passthrough_request_headers"`
}
// Clone returns a deep copy of the mount entry
From 835b355489f2ed333556a01c5d0f4615873758f7 Mon Sep 17 00:00:00 2001
From: Jeff Mitchell
Date: Fri, 25 May 2018 14:34:24 -0400
Subject: [PATCH 36/39] Add key information to list endpoints in identity.
(#4634)
* Add key information to list endpoints in identity.
Also fixes some bugs from before where we were persisting data that we
should not have been (mount type/path).
* Add cached lookups of real time mount info
---
vault/identity_store_aliases.go | 48 ++++++-
vault/identity_store_aliases_ext_test.go | 159 +++++++++++++++++++++++
vault/identity_store_aliases_test.go | 56 --------
vault/identity_store_entities.go | 53 +++++++-
vault/identity_store_group_aliases.go | 38 +++++-
vault/identity_store_groups.go | 41 +++++-
vault/identity_store_groups_ext_test.go | 135 +++++++++++++++++++
vault/identity_store_schema.go | 14 --
8 files changed, 459 insertions(+), 85 deletions(-)
diff --git a/vault/identity_store_aliases.go b/vault/identity_store_aliases.go
index 5fea5de71..1f578df19 100644
--- a/vault/identity_store_aliases.go
+++ b/vault/identity_store_aliases.go
@@ -319,9 +319,11 @@ func (i *IdentityStore) handleAliasUpdateCommon(req *logical.Request, d *framewo
// Update the fields
alias.Name = aliasName
alias.Metadata = aliasMetadata
- alias.MountType = mountValidationResp.MountType
alias.MountAccessor = mountValidationResp.MountAccessor
- alias.MountPath = mountValidationResp.MountPath
+
+ // Explicitly set to empty as in the past we incorrectly saved it
+ alias.MountPath = ""
+ alias.MountType = ""
// Set the canonical ID in the alias index. This should be done after
// sanitizing entity.
@@ -377,13 +379,16 @@ func (i *IdentityStore) handleAliasReadCommon(alias *identity.Alias) (*logical.R
respData := map[string]interface{}{}
respData["id"] = alias.ID
respData["canonical_id"] = alias.CanonicalID
- respData["mount_type"] = alias.MountType
respData["mount_accessor"] = alias.MountAccessor
- respData["mount_path"] = alias.MountPath
respData["metadata"] = alias.Metadata
respData["name"] = alias.Name
respData["merged_from_canonical_ids"] = alias.MergedFromCanonicalIDs
+ if mountValidationResp := i.core.router.validateMountByAccessor(alias.MountAccessor); mountValidationResp != nil {
+ respData["mount_path"] = mountValidationResp.MountPath
+ respData["mount_type"] = mountValidationResp.MountType
+ }
+
// Convert protobuf timestamp into RFC3339 format
respData["creation_time"] = ptypes.TimestampString(alias.CreationTime)
respData["last_update_time"] = ptypes.TimestampString(alias.LastUpdateTime)
@@ -416,15 +421,46 @@ func (i *IdentityStore) pathAliasIDList() framework.OperationFunc {
}
var aliasIDs []string
+ aliasInfo := map[string]interface{}{}
+
+ type mountInfo struct {
+ MountType string
+ MountPath string
+ }
+ mountAccessorMap := map[string]mountInfo{}
+
for {
raw := iter.Next()
if raw == nil {
break
}
- aliasIDs = append(aliasIDs, raw.(*identity.Alias).ID)
+ alias := raw.(*identity.Alias)
+ aliasIDs = append(aliasIDs, alias.ID)
+ aliasInfoEntry := map[string]interface{}{
+ "name": alias.Name,
+ "canonical_id": alias.CanonicalID,
+ "mount_accessor": alias.MountAccessor,
+ }
+
+ mi, ok := mountAccessorMap[alias.MountAccessor]
+ if ok {
+ aliasInfoEntry["mount_type"] = mi.MountType
+ aliasInfoEntry["mount_path"] = mi.MountPath
+ } else {
+ mi = mountInfo{}
+ if mountValidationResp := i.core.router.validateMountByAccessor(alias.MountAccessor); mountValidationResp != nil {
+ mi.MountType = mountValidationResp.MountType
+ mi.MountPath = mountValidationResp.MountPath
+ aliasInfoEntry["mount_type"] = mi.MountType
+ aliasInfoEntry["mount_path"] = mi.MountPath
+ }
+ mountAccessorMap[alias.MountAccessor] = mi
+ }
+
+ aliasInfo[alias.ID] = aliasInfoEntry
}
- return logical.ListResponse(aliasIDs), nil
+ return logical.ListResponseWithInfo(aliasIDs, aliasInfo), nil
}
}
diff --git a/vault/identity_store_aliases_ext_test.go b/vault/identity_store_aliases_ext_test.go
index 4051257b0..37ed4cb77 100644
--- a/vault/identity_store_aliases_ext_test.go
+++ b/vault/identity_store_aliases_ext_test.go
@@ -8,6 +8,7 @@ import (
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/vault"
+ "github.com/hashicorp/vault/builtin/credential/github"
credLdap "github.com/hashicorp/vault/builtin/credential/ldap"
)
@@ -60,3 +61,161 @@ func TestIdentityStore_EntityAliasLocalMount(t *testing.T) {
t.Fatalf("expected error since mount is local")
}
}
+
+func TestIdentityStore_ListAlias(t *testing.T) {
+ coreConfig := &vault.CoreConfig{
+ CredentialBackends: map[string]logical.Factory{
+ "github": github.Factory,
+ },
+ }
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: vaulthttp.Handler,
+ })
+ cluster.Start()
+ defer cluster.Cleanup()
+
+ core := cluster.Cores[0].Core
+ vault.TestWaitActive(t, core)
+ client := cluster.Cores[0].Client
+
+ err := client.Sys().EnableAuthWithOptions("github", &api.EnableAuthOptions{
+ Type: "github",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ mounts, err := client.Sys().ListAuth()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var githubAccessor string
+ for k, v := range mounts {
+ t.Logf("key: %v\nmount: %#v", k, *v)
+ if k == "github/" {
+ githubAccessor = v.Accessor
+ break
+ }
+ }
+ if githubAccessor == "" {
+ t.Fatal("did not find github accessor")
+ }
+
+ resp, err := client.Logical().Write("identity/entity", nil)
+ if err != nil {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ if resp == nil {
+ t.Fatalf("expected a non-nil response")
+ }
+
+ entityID := resp.Data["id"].(string)
+
+ // Create an alias
+ resp, err = client.Logical().Write("identity/entity-alias", map[string]interface{}{
+ "name": "testaliasname",
+ "mount_accessor": githubAccessor,
+ })
+ if err != nil {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ testAliasCanonicalID := resp.Data["canonical_id"].(string)
+ testAliasAliasID := resp.Data["id"].(string)
+
+ resp, err = client.Logical().Write("identity/entity-alias", map[string]interface{}{
+ "name": "entityalias",
+ "mount_accessor": githubAccessor,
+ "canonical_id": entityID,
+ })
+ if err != nil {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ entityAliasAliasID := resp.Data["id"].(string)
+
+ resp, err = client.Logical().List("identity/entity-alias/id")
+ if err != nil {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ keys := resp.Data["keys"].([]interface{})
+ if len(keys) != 2 {
+ t.Fatalf("bad: length of alias IDs listed; expected: 2, actual: %d", len(keys))
+ }
+
+ // Do some due diligence on the key info
+ aliasInfoRaw, ok := resp.Data["key_info"]
+ if !ok {
+ t.Fatal("expected key_info map in response")
+ }
+ aliasInfo := aliasInfoRaw.(map[string]interface{})
+ for _, keyRaw := range keys {
+ key := keyRaw.(string)
+ infoRaw, ok := aliasInfo[key]
+ if !ok {
+ t.Fatal("expected key info")
+ }
+ info := infoRaw.(map[string]interface{})
+ currName := "entityalias"
+ if info["canonical_id"].(string) == testAliasCanonicalID {
+ currName = "testaliasname"
+ }
+ t.Logf("alias info: %#v", info)
+ switch {
+ case info["name"].(string) != currName:
+ t.Fatalf("bad name: %v", info["name"].(string))
+ case info["mount_accessor"].(string) != githubAccessor:
+ t.Fatalf("bad mount_path: %v", info["mount_accessor"].(string))
+ }
+ }
+
+ // Now do the same with entity info
+ resp, err = client.Logical().List("identity/entity/id")
+ if err != nil {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ keys = resp.Data["keys"].([]interface{})
+ if len(keys) != 2 {
+ t.Fatalf("bad: length of entity IDs listed; expected: 2, actual: %d", len(keys))
+ }
+
+ entityInfoRaw, ok := resp.Data["key_info"]
+ if !ok {
+ t.Fatal("expected key_info map in response")
+ }
+
+ // This is basically verifying that the entity has the alias in key_info
+ // that we expect to be tied to it, plus tests a value further down in it
+ // for fun
+ entityInfo := entityInfoRaw.(map[string]interface{})
+ for _, keyRaw := range keys {
+ key := keyRaw.(string)
+ infoRaw, ok := entityInfo[key]
+ if !ok {
+ t.Fatal("expected key info")
+ }
+ info := infoRaw.(map[string]interface{})
+ t.Logf("entity info: %#v", info)
+ currAliasID := entityAliasAliasID
+ if key == testAliasCanonicalID {
+ currAliasID = testAliasAliasID
+ }
+ currAliases := info["aliases"].([]interface{})
+ if len(currAliases) != 1 {
+ t.Fatal("bad aliases length")
+ }
+ for _, v := range currAliases {
+ curr := v.(map[string]interface{})
+ switch {
+ case curr["id"].(string) != currAliasID:
+ t.Fatalf("bad alias id: %v", curr["id"])
+ case curr["mount_accessor"].(string) != githubAccessor:
+ t.Fatalf("bad mount accessor: %v", curr["mount_accessor"])
+ case curr["mount_path"].(string) != "auth/github/":
+ t.Fatalf("bad mount path: %v", curr["mount_path"])
+ case curr["mount_type"].(string) != "github":
+ t.Fatalf("bad mount type: %v", curr["mount_type"])
+ }
+ }
+ }
+}
diff --git a/vault/identity_store_aliases_test.go b/vault/identity_store_aliases_test.go
index 081e8c3c0..8d088b3b1 100644
--- a/vault/identity_store_aliases_test.go
+++ b/vault/identity_store_aliases_test.go
@@ -9,62 +9,6 @@ import (
"github.com/hashicorp/vault/logical"
)
-func TestIdentityStore_ListAlias(t *testing.T) {
- var err error
- var resp *logical.Response
-
- is, githubAccessor, _ := testIdentityStoreWithGithubAuth(t)
-
- entityReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "entity",
- }
- resp, err = is.HandleRequest(context.Background(), entityReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- if resp == nil {
- t.Fatalf("expected a non-nil response")
- }
- entityID := resp.Data["id"].(string)
-
- // Create an alias
- aliasData := map[string]interface{}{
- "name": "testaliasname",
- "mount_accessor": githubAccessor,
- }
- aliasReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "entity-alias",
- Data: aliasData,
- }
- resp, err = is.HandleRequest(context.Background(), aliasReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- aliasData["name"] = "entityalias"
- aliasData["entity_id"] = entityID
- resp, err = is.HandleRequest(context.Background(), aliasReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- listReq := &logical.Request{
- Operation: logical.ListOperation,
- Path: "entity-alias/id",
- }
- resp, err = is.HandleRequest(context.Background(), listReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- keys := resp.Data["keys"].([]string)
- if len(keys) != 2 {
- t.Fatalf("bad: length of alias IDs listed; expected: 2, actual: %d", len(keys))
- }
-}
-
// This test is required because MemDB does not take care of ensuring
// uniqueness of indexes that are marked unique.
func TestIdentityStore_AliasSameAliasNames(t *testing.T) {
diff --git a/vault/identity_store_entities.go b/vault/identity_store_entities.go
index fccb5d3eb..5096ed0b0 100644
--- a/vault/identity_store_entities.go
+++ b/vault/identity_store_entities.go
@@ -459,14 +459,18 @@ func (i *IdentityStore) handleEntityReadCommon(entity *identity.Entity) (*logica
aliasMap := map[string]interface{}{}
aliasMap["id"] = alias.ID
aliasMap["canonical_id"] = alias.CanonicalID
- aliasMap["mount_type"] = alias.MountType
aliasMap["mount_accessor"] = alias.MountAccessor
- aliasMap["mount_path"] = alias.MountPath
aliasMap["metadata"] = alias.Metadata
aliasMap["name"] = alias.Name
aliasMap["merged_from_canonical_ids"] = alias.MergedFromCanonicalIDs
aliasMap["creation_time"] = ptypes.TimestampString(alias.CreationTime)
aliasMap["last_update_time"] = ptypes.TimestampString(alias.LastUpdateTime)
+
+ if mountValidationResp := i.core.router.validateMountByAccessor(alias.MountAccessor); mountValidationResp != nil {
+ aliasMap["mount_type"] = mountValidationResp.MountType
+ aliasMap["mount_path"] = mountValidationResp.MountPath
+ }
+
aliasesToReturn[aliasIdx] = aliasMap
}
@@ -522,15 +526,56 @@ func (i *IdentityStore) pathEntityIDList() framework.OperationFunc {
}
var entityIDs []string
+ entityInfo := map[string]interface{}{}
+
+ type mountInfo struct {
+ MountType string
+ MountPath string
+ }
+ mountAccessorMap := map[string]mountInfo{}
+
for {
raw := iter.Next()
if raw == nil {
break
}
- entityIDs = append(entityIDs, raw.(*identity.Entity).ID)
+ entity := raw.(*identity.Entity)
+ entityIDs = append(entityIDs, entity.ID)
+ entityInfoEntry := map[string]interface{}{
+ "name": entity.Name,
+ }
+ if len(entity.Aliases) > 0 {
+ aliasList := make([]interface{}, 0, len(entity.Aliases))
+ for _, alias := range entity.Aliases {
+ entry := map[string]interface{}{
+ "id": alias.ID,
+ "name": alias.Name,
+ "mount_accessor": alias.MountAccessor,
+ }
+
+ mi, ok := mountAccessorMap[alias.MountAccessor]
+ if ok {
+ entry["mount_type"] = mi.MountType
+ entry["mount_path"] = mi.MountPath
+ } else {
+ mi = mountInfo{}
+ if mountValidationResp := i.core.router.validateMountByAccessor(alias.MountAccessor); mountValidationResp != nil {
+ mi.MountType = mountValidationResp.MountType
+ mi.MountPath = mountValidationResp.MountPath
+ entry["mount_type"] = mi.MountType
+ entry["mount_path"] = mi.MountPath
+ }
+ mountAccessorMap[alias.MountAccessor] = mi
+ }
+
+ aliasList = append(aliasList, entry)
+ }
+ entityInfoEntry["aliases"] = aliasList
+ }
+ entityInfo[entity.ID] = entityInfoEntry
}
- return logical.ListResponse(entityIDs), nil
+ return logical.ListResponseWithInfo(entityIDs, entityInfo), nil
}
}
diff --git a/vault/identity_store_group_aliases.go b/vault/identity_store_group_aliases.go
index 6c8962417..047fc4799 100644
--- a/vault/identity_store_group_aliases.go
+++ b/vault/identity_store_group_aliases.go
@@ -210,8 +210,9 @@ func (i *IdentityStore) handleGroupAliasUpdateCommon(req *logical.Request, d *fr
}
group.Alias.Name = groupAliasName
- group.Alias.MountType = mountValidationResp.MountType
group.Alias.MountAccessor = mountValidationResp.MountAccessor
+ // Explicitly correct for previous versions that persisted this
+ group.Alias.MountType = ""
err = i.sanitizeAndUpsertGroup(group, nil)
if err != nil {
@@ -267,15 +268,46 @@ func (i *IdentityStore) pathGroupAliasIDList() framework.OperationFunc {
}
var groupAliasIDs []string
+ aliasInfo := map[string]interface{}{}
+
+ type mountInfo struct {
+ MountType string
+ MountPath string
+ }
+ mountAccessorMap := map[string]mountInfo{}
+
for {
raw := iter.Next()
if raw == nil {
break
}
- groupAliasIDs = append(groupAliasIDs, raw.(*identity.Alias).ID)
+ alias := raw.(*identity.Alias)
+ groupAliasIDs = append(groupAliasIDs, alias.ID)
+ entry := map[string]interface{}{
+ "name": alias.Name,
+ "canonical_id": alias.CanonicalID,
+ "mount_accessor": alias.MountAccessor,
+ }
+
+ mi, ok := mountAccessorMap[alias.MountAccessor]
+ if ok {
+ entry["mount_type"] = mi.MountType
+ entry["mount_path"] = mi.MountPath
+ } else {
+ mi = mountInfo{}
+ if mountValidationResp := i.core.router.validateMountByAccessor(alias.MountAccessor); mountValidationResp != nil {
+ mi.MountType = mountValidationResp.MountType
+ mi.MountPath = mountValidationResp.MountPath
+ entry["mount_type"] = mi.MountType
+ entry["mount_path"] = mi.MountPath
+ }
+ mountAccessorMap[alias.MountAccessor] = mi
+ }
+
+ aliasInfo[alias.ID] = entry
}
- return logical.ListResponse(groupAliasIDs), nil
+ return logical.ListResponseWithInfo(groupAliasIDs, aliasInfo), nil
}
}
diff --git a/vault/identity_store_groups.go b/vault/identity_store_groups.go
index b492d87dd..8bd48d6f8 100644
--- a/vault/identity_store_groups.go
+++ b/vault/identity_store_groups.go
@@ -331,15 +331,52 @@ func (i *IdentityStore) pathGroupIDList() framework.OperationFunc {
}
var groupIDs []string
+ groupInfo := map[string]interface{}{}
+
+ type mountInfo struct {
+ MountType string
+ MountPath string
+ }
+ mountAccessorMap := map[string]mountInfo{}
+
for {
raw := iter.Next()
if raw == nil {
break
}
- groupIDs = append(groupIDs, raw.(*identity.Group).ID)
+ group := raw.(*identity.Group)
+ groupIDs = append(groupIDs, group.ID)
+ groupInfoEntry := map[string]interface{}{
+ "name": group.Name,
+ }
+ if group.Alias != nil {
+ entry := map[string]interface{}{
+ "id": group.Alias.ID,
+ "name": group.Alias.Name,
+ "mount_accessor": group.Alias.MountAccessor,
+ }
+
+ mi, ok := mountAccessorMap[group.Alias.MountAccessor]
+ if ok {
+ entry["mount_type"] = mi.MountType
+ entry["mount_path"] = mi.MountPath
+ } else {
+ mi = mountInfo{}
+ if mountValidationResp := i.core.router.validateMountByAccessor(group.Alias.MountAccessor); mountValidationResp != nil {
+ mi.MountType = mountValidationResp.MountType
+ mi.MountPath = mountValidationResp.MountPath
+ entry["mount_type"] = mi.MountType
+ entry["mount_path"] = mi.MountPath
+ }
+ mountAccessorMap[group.Alias.MountAccessor] = mi
+ }
+
+ groupInfoEntry["alias"] = entry
+ }
+ groupInfo[group.ID] = groupInfoEntry
}
- return logical.ListResponse(groupIDs), nil
+ return logical.ListResponseWithInfo(groupIDs, groupInfo), nil
}
}
diff --git a/vault/identity_store_groups_ext_test.go b/vault/identity_store_groups_ext_test.go
index 372454779..3881a9f01 100644
--- a/vault/identity_store_groups_ext_test.go
+++ b/vault/identity_store_groups_ext_test.go
@@ -8,9 +8,144 @@ import (
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/vault"
+ "github.com/hashicorp/vault/builtin/credential/github"
credLdap "github.com/hashicorp/vault/builtin/credential/ldap"
)
+func TestIdentityStore_ListGroupAlias(t *testing.T) {
+ coreConfig := &vault.CoreConfig{
+ CredentialBackends: map[string]logical.Factory{
+ "github": github.Factory,
+ },
+ }
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: vaulthttp.Handler,
+ })
+ cluster.Start()
+ defer cluster.Cleanup()
+
+ core := cluster.Cores[0].Core
+ vault.TestWaitActive(t, core)
+ client := cluster.Cores[0].Client
+
+ err := client.Sys().EnableAuthWithOptions("github", &api.EnableAuthOptions{
+ Type: "github",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ mounts, err := client.Sys().ListAuth()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var githubAccessor string
+ for k, v := range mounts {
+ t.Logf("key: %v\nmount: %#v", k, *v)
+ if k == "github/" {
+ githubAccessor = v.Accessor
+ break
+ }
+ }
+ if githubAccessor == "" {
+ t.Fatal("did not find github accessor")
+ }
+
+ resp, err := client.Logical().Write("identity/group", map[string]interface{}{
+ "type": "external",
+ })
+ if err != nil {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ groupID := resp.Data["id"].(string)
+
+ resp, err = client.Logical().Write("identity/group-alias", map[string]interface{}{
+ "name": "groupalias",
+ "mount_accessor": githubAccessor,
+ "canonical_id": groupID,
+ })
+ if err != nil {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+ aliasID := resp.Data["id"].(string)
+
+ resp, err = client.Logical().List("identity/group-alias/id")
+ if err != nil {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ keys := resp.Data["keys"].([]interface{})
+ if len(keys) != 1 {
+ t.Fatalf("bad: length of alias IDs listed; expected: 1, actual: %d", len(keys))
+ }
+
+ // Do some due diligence on the key info
+ aliasInfoRaw, ok := resp.Data["key_info"]
+ if !ok {
+ t.Fatal("expected key_info map in response")
+ }
+ aliasInfo := aliasInfoRaw.(map[string]interface{})
+ if len(aliasInfo) != 1 {
+ t.Fatalf("bad: length of alias ID key info; expected: 1, actual: %d", len(aliasInfo))
+ }
+
+ infoRaw, ok := aliasInfo[aliasID]
+ if !ok {
+ t.Fatal("expected to find alias ID in key info map")
+ }
+ info := infoRaw.(map[string]interface{})
+ t.Logf("alias info: %#v", info)
+ switch {
+ case info["name"].(string) != "groupalias":
+ t.Fatalf("bad name: %v", info["name"].(string))
+ case info["mount_accessor"].(string) != githubAccessor:
+ t.Fatalf("bad mount_accessor: %v", info["mount_accessor"].(string))
+ }
+
+ // Now do the same with group info
+ resp, err = client.Logical().List("identity/group/id")
+ if err != nil {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ keys = resp.Data["keys"].([]interface{})
+ if len(keys) != 1 {
+ t.Fatalf("bad: length of group IDs listed; expected: 1, actual: %d", len(keys))
+ }
+
+ groupInfoRaw, ok := resp.Data["key_info"]
+ if !ok {
+ t.Fatal("expected key_info map in response")
+ }
+
+ // This is basically verifying that the group has the alias in key_info
+ // that we expect to be tied to it, plus tests a value further down in it
+ // for fun
+ groupInfo := groupInfoRaw.(map[string]interface{})
+ if len(groupInfo) != 1 {
+ t.Fatalf("bad: length of group ID key info; expected: 1, actual: %d", len(groupInfo))
+ }
+
+ infoRaw, ok = groupInfo[groupID]
+ if !ok {
+ t.Fatal("expected key info")
+ }
+ info = infoRaw.(map[string]interface{})
+ t.Logf("group info: %#v", info)
+ alias := info["alias"].(map[string]interface{})
+ switch {
+ case alias["id"].(string) != aliasID:
+ t.Fatalf("bad alias id: %v", alias["id"])
+ case alias["mount_accessor"].(string) != githubAccessor:
+ t.Fatalf("bad mount accessor: %v", alias["mount_accessor"])
+ case alias["mount_path"].(string) != "auth/github/":
+ t.Fatalf("bad mount path: %v", alias["mount_path"])
+ case alias["mount_type"].(string) != "github":
+ t.Fatalf("bad mount type: %v", alias["mount_type"])
+ }
+}
+
// Testing the fix for GH-4351
func TestIdentityStore_ExternalGroupMembershipsAcrossMounts(t *testing.T) {
coreConfig := &vault.CoreConfig{
diff --git a/vault/identity_store_schema.go b/vault/identity_store_schema.go
index 33bbae4d8..3fce0bf35 100644
--- a/vault/identity_store_schema.go
+++ b/vault/identity_store_schema.go
@@ -54,13 +54,6 @@ func aliasesTableSchema() *memdb.TableSchema {
Field: "CanonicalID",
},
},
- "mount_type": &memdb.IndexSchema{
- Name: "mount_type",
- Unique: false,
- Indexer: &memdb.StringFieldIndex{
- Field: "MountType",
- },
- },
"factors": &memdb.IndexSchema{
Name: "factors",
Unique: true,
@@ -205,13 +198,6 @@ func groupAliasesTableSchema() *memdb.TableSchema {
Field: "CanonicalID",
},
},
- "mount_type": &memdb.IndexSchema{
- Name: "mount_type",
- Unique: false,
- Indexer: &memdb.StringFieldIndex{
- Field: "MountType",
- },
- },
"factors": &memdb.IndexSchema{
Name: "factors",
Unique: true,
From 12976bf60e0973a49e42437e50c79ed6a83d79b7 Mon Sep 17 00:00:00 2001
From: Becca Petrin
Date: Fri, 25 May 2018 11:35:09 -0700
Subject: [PATCH 37/39] add userpass note on bound cidrs (#4610)
---
website/source/api/auth/userpass/index.html.md | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/website/source/api/auth/userpass/index.html.md b/website/source/api/auth/userpass/index.html.md
index b7ecad404..593975b67 100644
--- a/website/source/api/auth/userpass/index.html.md
+++ b/website/source/api/auth/userpass/index.html.md
@@ -34,13 +34,17 @@ Create a new user or update an existing user. This path honors the distinction b
string, only the `default` policy will be applicable to the user.
- `ttl` `(string: "")` - The lease duration which decides login expiration.
- `max_ttl` `(string: "")` - Maximum duration after which login should expire.
+- `bound_cidrs` `(string: "", or list: [])` – If set, restricts usage of the
+ login and token to client IPs falling within the range of the specified
+ CIDR(s).
### Sample Payload
```json
{
"password": "superSecretPassword",
- "policies": "admin,default"
+ "policies": "admin,default",
+ "bound_cidrs": ["127.0.0.1/32", "128.252.0.0/16"]
}
```
From 94ae5d2567f67f022d171c8f976293d3e4b350db Mon Sep 17 00:00:00 2001
From: Becca Petrin
Date: Fri, 25 May 2018 11:37:41 -0700
Subject: [PATCH 38/39] Add Active Directory secrets plugin (#4635)
---
command/commands.go | 2 +
.../hashicorp/vault-plugin-secrets-ad/LICENSE | 373 ++++
.../vault-plugin-secrets-ad/plugin/backend.go | 80 +
.../plugin/client/client.go | 140 ++
.../plugin/client/entry.go | 41 +
.../plugin/client/fieldregistry.go | 113 ++
.../plugin/client/time.go | 43 +
.../plugin/engineconf.go | 10 +
.../plugin/passwordconf.go | 15 +
.../plugin/path_config.go | 201 ++
.../plugin/path_creds.go | 216 +++
.../plugin/path_roles.go | 254 +++
.../vault-plugin-secrets-ad/plugin/role.go | 28 +
.../plugin/util/passwords.go | 38 +
.../plugin/util/secrets_client.go | 73 +
vendor/golang.org/x/text/encoding/encoding.go | 335 ++++
.../internal/identifier/identifier.go | 81 +
.../text/encoding/internal/identifier/mib.go | 1621 +++++++++++++++++
.../x/text/encoding/internal/internal.go | 75 +
.../x/text/encoding/unicode/override.go | 82 +
.../x/text/encoding/unicode/unicode.go | 434 +++++
.../internal/utf8internal/utf8internal.go | 87 +
vendor/golang.org/x/text/runes/cond.go | 187 ++
vendor/golang.org/x/text/runes/runes.go | 355 ++++
vendor/vendor.json | 18 +
25 files changed, 4902 insertions(+)
create mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/LICENSE
create mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/backend.go
create mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/client/client.go
create mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/client/entry.go
create mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/client/fieldregistry.go
create mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/client/time.go
create mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/engineconf.go
create mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/passwordconf.go
create mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_config.go
create mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_creds.go
create mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_roles.go
create mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/role.go
create mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/util/passwords.go
create mode 100644 vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/util/secrets_client.go
create mode 100644 vendor/golang.org/x/text/encoding/encoding.go
create mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/identifier.go
create mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/mib.go
create mode 100644 vendor/golang.org/x/text/encoding/internal/internal.go
create mode 100644 vendor/golang.org/x/text/encoding/unicode/override.go
create mode 100644 vendor/golang.org/x/text/encoding/unicode/unicode.go
create mode 100644 vendor/golang.org/x/text/internal/utf8internal/utf8internal.go
create mode 100644 vendor/golang.org/x/text/runes/cond.go
create mode 100644 vendor/golang.org/x/text/runes/runes.go
diff --git a/command/commands.go b/command/commands.go
index a02cdda9b..9e092ed47 100644
--- a/command/commands.go
+++ b/command/commands.go
@@ -6,6 +6,7 @@ import (
"os/signal"
"syscall"
+ ad "github.com/hashicorp/vault-plugin-secrets-ad/plugin"
gcp "github.com/hashicorp/vault-plugin-secrets-gcp/plugin"
kv "github.com/hashicorp/vault-plugin-secrets-kv"
"github.com/hashicorp/vault/audit"
@@ -110,6 +111,7 @@ var (
}
logicalBackends = map[string]logical.Factory{
+ "ad": ad.Factory,
"aws": aws.Factory,
"cassandra": cassandra.Factory,
"consul": consul.Factory,
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/LICENSE b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/LICENSE
new file mode 100644
index 000000000..a612ad981
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/backend.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/backend.go
new file mode 100644
index 000000000..1ed1143fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/backend.go
@@ -0,0 +1,80 @@
+package plugin
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/hashicorp/vault-plugin-secrets-ad/plugin/client"
+ "github.com/hashicorp/vault-plugin-secrets-ad/plugin/util"
+ "github.com/hashicorp/vault/helper/ldaputil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ "github.com/patrickmn/go-cache"
+)
+
+func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) {
+ backend := newBackend(util.NewSecretsClient(conf.Logger))
+ backend.Setup(ctx, conf)
+ return backend, nil
+}
+
+func newBackend(client secretsClient) *backend {
+ adBackend := &backend{
+ client: client,
+ roleCache: cache.New(roleCacheExpiration, roleCacheCleanup),
+ credCache: cache.New(credCacheExpiration, credCacheCleanup),
+ }
+ adBackend.Backend = &framework.Backend{
+ Help: backendHelp,
+ Paths: []*framework.Path{
+ adBackend.pathConfig(),
+ adBackend.pathRoles(),
+ adBackend.pathListRoles(),
+ adBackend.pathCreds(),
+ },
+ PathsSpecial: &logical.Paths{
+ SealWrapStorage: []string{
+ configPath,
+ credPrefix,
+ },
+ },
+ Invalidate: adBackend.Invalidate,
+ BackendType: logical.TypeLogical,
+ }
+ return adBackend
+}
+
+type backend struct {
+ logical.Backend
+
+ client secretsClient
+
+ roleCache *cache.Cache
+ credCache *cache.Cache
+ credLock sync.Mutex
+}
+
+func (b *backend) Invalidate(ctx context.Context, key string) {
+ b.invalidateRole(ctx, key)
+ b.invalidateCred(ctx, key)
+}
+
+// Wraps the *util.SecretsClient in an interface to support testing.
+type secretsClient interface {
+ Get(conf *ldaputil.ConfigEntry, serviceAccountName string) (*client.Entry, error)
+ GetPasswordLastSet(conf *ldaputil.ConfigEntry, serviceAccountName string) (time.Time, error)
+ UpdatePassword(conf *ldaputil.ConfigEntry, serviceAccountName string, newPassword string) error
+}
+
+const backendHelp = `
+The Active Directory (AD) secrets engine rotates AD passwords dynamically,
+and is designed for a high-load environment where many instances may be accessing
+a shared password simultaneously. With a simple set up and a simple creds API,
+it doesn't require instances to be manually registered in advance to gain access.
+As long as access has been granted to the creds path via a method like
+AppRole, they're available.
+
+Passwords are lazily rotated based on preset TTLs and can have a length configured to meet
+your needs.
+`
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/client/client.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/client/client.go
new file mode 100644
index 000000000..a0fa2f2f5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/client/client.go
@@ -0,0 +1,140 @@
+package client
+
+import (
+ "fmt"
+ "math"
+ "strings"
+
+ "github.com/go-errors/errors"
+ "github.com/go-ldap/ldap"
+ "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/vault/helper/ldaputil"
+ "golang.org/x/text/encoding/unicode"
+)
+
+func NewClient(logger hclog.Logger) *Client {
+ return &Client{
+ ldap: &ldaputil.Client{
+ Logger: logger,
+ LDAP: ldaputil.NewLDAP(),
+ },
+ }
+}
+
+type Client struct {
+ ldap *ldaputil.Client
+}
+
+func (c *Client) Search(cfg *ldaputil.ConfigEntry, filters map[*Field][]string) ([]*Entry, error) {
+ req := &ldap.SearchRequest{
+ BaseDN: cfg.UserDN,
+ Scope: ldap.ScopeWholeSubtree,
+ Filter: toString(filters),
+ SizeLimit: math.MaxInt32,
+ }
+
+ conn, err := c.ldap.DialLDAP(cfg)
+ if err != nil {
+ return nil, err
+ }
+ defer conn.Close()
+
+ if err := bind(cfg, conn); err != nil {
+ return nil, err
+ }
+
+ result, err := conn.Search(req)
+ if err != nil {
+ return nil, err
+ }
+
+ entries := make([]*Entry, len(result.Entries))
+ for i, rawEntry := range result.Entries {
+ entries[i] = NewEntry(rawEntry)
+ }
+ return entries, nil
+}
+
+func (c *Client) UpdateEntry(cfg *ldaputil.ConfigEntry, filters map[*Field][]string, newValues map[*Field][]string) error {
+ entries, err := c.Search(cfg, filters)
+ if err != nil {
+ return err
+ }
+ if len(entries) != 1 {
+ return fmt.Errorf("filter of %s doesn't match just one entry: %s", filters, entries)
+ }
+
+ replaceAttributes := make([]ldap.PartialAttribute, len(newValues))
+ i := 0
+ for field, vals := range newValues {
+ replaceAttributes[i] = ldap.PartialAttribute{
+ Type: field.String(),
+ Vals: vals,
+ }
+ i++
+ }
+
+ modifyReq := &ldap.ModifyRequest{
+ DN: entries[0].DN,
+ ReplaceAttributes: replaceAttributes,
+ }
+
+ conn, err := c.ldap.DialLDAP(cfg)
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ if err := bind(cfg, conn); err != nil {
+ return err
+ }
+ return conn.Modify(modifyReq)
+}
+
+// UpdatePassword uses a Modify call under the hood because
+// Active Directory doesn't recognize the passwordModify method.
+// See https://github.com/go-ldap/ldap/issues/106
+// for more.
+func (c *Client) UpdatePassword(cfg *ldaputil.ConfigEntry, filters map[*Field][]string, newPassword string) error {
+ pwdEncoded, err := formatPassword(newPassword)
+ if err != nil {
+ return err
+ }
+
+ newValues := map[*Field][]string{
+ FieldRegistry.UnicodePassword: {pwdEncoded},
+ }
+
+ return c.UpdateEntry(cfg, filters, newValues)
+}
+
+// According to the MS docs, the password needs to be utf16 and enclosed in quotes.
+func formatPassword(original string) (string, error) {
+ utf16 := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
+ return utf16.NewEncoder().String("\"" + original + "\"")
+}
+
+// Ex. "(cn=Ellen Jones)"
+func toString(filters map[*Field][]string) string {
+ var fieldEquals []string
+ for f, values := range filters {
+ for _, v := range values {
+ fieldEquals = append(fieldEquals, fmt.Sprintf("%s=%s", f, v))
+ }
+ }
+ result := strings.Join(fieldEquals, ",")
+ return "(" + result + ")"
+}
+
+func bind(cfg *ldaputil.ConfigEntry, conn ldaputil.Connection) error {
+ if cfg.BindPassword == "" {
+ return errors.New("unable to bind due to lack of configured password")
+ }
+ if cfg.UPNDomain != "" {
+ return conn.Bind(fmt.Sprintf("%s@%s", ldaputil.EscapeLDAPValue(cfg.BindDN), cfg.UPNDomain), cfg.BindPassword)
+ }
+ if cfg.BindDN != "" {
+ return conn.Bind(cfg.BindDN, cfg.BindPassword)
+ }
+ return errors.New("must provide binddn or upndomain")
+}
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/client/entry.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/client/entry.go
new file mode 100644
index 000000000..a928ad050
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/client/entry.go
@@ -0,0 +1,41 @@
+package client
+
+import (
+ "strings"
+
+ "github.com/go-ldap/ldap"
+)
+
+// Entry is an Active Directory-specific construct
+// to make knowing and grabbing fields more convenient,
+// while retaining all original information.
+func NewEntry(ldapEntry *ldap.Entry) *Entry {
+ fieldMap := make(map[string][]string)
+ for _, attribute := range ldapEntry.Attributes {
+ field := FieldRegistry.Parse(attribute.Name)
+ if field == nil {
+ // This field simply isn't in the registry, no big deal.
+ continue
+ }
+ fieldMap[field.String()] = attribute.Values
+ }
+ return &Entry{fieldMap: fieldMap, Entry: ldapEntry}
+}
+
+type Entry struct {
+ *ldap.Entry
+ fieldMap map[string][]string
+}
+
+func (e *Entry) Get(field *Field) ([]string, bool) {
+ values, found := e.fieldMap[field.String()]
+ return values, found
+}
+
+func (e *Entry) GetJoined(field *Field) (string, bool) {
+ values, found := e.Get(field)
+ if !found {
+ return "", false
+ }
+ return strings.Join(values, ","), true
+}
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/client/fieldregistry.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/client/fieldregistry.go
new file mode 100644
index 000000000..4baf1e0a0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/client/fieldregistry.go
@@ -0,0 +1,113 @@
+package client
+
+import (
+ "reflect"
+)
+
+// FieldRegistry is designed to look and feel
+// like an enum from another language like Python.
+//
+// Example: Accessing constants
+//
+// FieldRegistry.AccountExpires
+// FieldRegistry.BadPasswordCount
+//
+// Example: Utility methods
+//
+// FieldRegistry.List()
+// FieldRegistry.Parse("givenName")
+//
+var FieldRegistry = newFieldRegistry()
+
+// newFieldRegistry iterates through all the fields in the registry,
+// pulls their ldap strings, and sets up each field to contain its ldap string
+func newFieldRegistry() *fieldRegistry {
+ reg := &fieldRegistry{}
+ vOfReg := reflect.ValueOf(reg)
+
+ registryFields := vOfReg.Elem()
+ for i := 0; i < registryFields.NumField(); i++ {
+
+ if registryFields.Field(i).Kind() == reflect.Ptr {
+
+ field := registryFields.Type().Field(i)
+ ldapString := field.Tag.Get("ldap")
+ ldapField := &Field{ldapString}
+ vOfLDAPField := reflect.ValueOf(ldapField)
+
+ registryFields.FieldByName(field.Name).Set(vOfLDAPField)
+
+ reg.fieldList = append(reg.fieldList, ldapField)
+ }
+ }
+ return reg
+}
+
+// fieldRegistry isn't currently intended to be an exhaustive list -
+// there are more fields in ActiveDirectory. However, these are the ones
+// that may be useful to Vault. Feel free to add to this list!
+type fieldRegistry struct {
+ AccountExpires *Field `ldap:"accountExpires"`
+ AdminCount *Field `ldap:"adminCount"`
+ BadPasswordCount *Field `ldap:"badPwdCount"`
+ BadPasswordTime *Field `ldap:"badPasswordTime"`
+ CodePage *Field `ldap:"codePage"`
+ CommonName *Field `ldap:"cn"`
+ CountryCode *Field `ldap:"countryCode"`
+ DisplayName *Field `ldap:"displayName"`
+ DistinguishedName *Field `ldap:"distinguishedName"`
+ DomainComponent *Field `ldap:"dc"`
+ DomainName *Field `ldap:"dn"`
+ DSCorePropogationData *Field `ldap:"dSCorePropagationData"`
+ GivenName *Field `ldap:"givenName"`
+ GroupType *Field `ldap:"groupType"`
+ Initials *Field `ldap:"initials"`
+ InstanceType *Field `ldap:"instanceType"`
+ LastLogoff *Field `ldap:"lastLogoff"`
+ LastLogon *Field `ldap:"lastLogon"`
+ LastLogonTimestamp *Field `ldap:"lastLogonTimestamp"`
+ LockoutTime *Field `ldap:"lockoutTime"`
+ LogonCount *Field `ldap:"logonCount"`
+ MemberOf *Field `ldap:"memberOf"`
+ Name *Field `ldap:"name"`
+ ObjectCategory *Field `ldap:"objectCategory"`
+ ObjectClass *Field `ldap:"objectClass"`
+ ObjectGUID *Field `ldap:"objectGUID"`
+ ObjectSID *Field `ldap:"objectSid"`
+ OrganizationalUnit *Field `ldap:"ou"`
+ PasswordLastSet *Field `ldap:"pwdLastSet"`
+ PrimaryGroupID *Field `ldap:"primaryGroupID"`
+ SAMAccountName *Field `ldap:"sAMAccountName"`
+ SAMAccountType *Field `ldap:"sAMAccountType"`
+ Surname *Field `ldap:"sn"`
+ UnicodePassword *Field `ldap:"unicodePwd"`
+ UpdateSequenceNumberChanged *Field `ldap:"uSNChanged"`
+ UpdateSequenceNumberCreated *Field `ldap:"uSNCreated"`
+ UserAccountControl *Field `ldap:"userAccountControl"`
+ UserPrincipalName *Field `ldap:"userPrincipalName"`
+ WhenCreated *Field `ldap:"whenCreated"`
+ WhenChanged *Field `ldap:"whenChanged"`
+
+ fieldList []*Field
+}
+
+func (r *fieldRegistry) List() []*Field {
+ return r.fieldList
+}
+
+func (r *fieldRegistry) Parse(s string) *Field {
+ for _, f := range r.List() {
+ if f.String() == s {
+ return f
+ }
+ }
+ return nil
+}
+
+type Field struct {
+ str string
+}
+
+func (f *Field) String() string {
+ return f.str
+}
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/client/time.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/client/time.go
new file mode 100644
index 000000000..c451ba473
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/client/time.go
@@ -0,0 +1,43 @@
+package client
+
+import (
+ "strconv"
+ "time"
+)
+
+const (
+ nanoSecondsPerSecond = 1000000000
+ nanosInTick = 100
+ ticksPerSecond = nanoSecondsPerSecond / nanosInTick
+)
+
+// ParseTicks parses dates represented as Active Directory LargeInts into times.
+// Not all time fields are represented this way,
+// so be sure to test that your particular time returns expected results.
+// Some time fields represented as LargeInts include accountExpires, lastLogon, lastLogonTimestamp, and pwdLastSet.
+// More: https://social.technet.microsoft.com/wiki/contents/articles/31135.active-directory-large-integer-attributes.aspx
+func ParseTicks(ticks string) (time.Time, error) {
+ i, err := strconv.ParseInt(ticks, 10, 64)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return TicksToTime(i), nil
+}
+
+// TicksToTime converts an ActiveDirectory time in ticks to a time.
+// This algorithm is summarized as:
+//
+// Many dates are saved in Active Directory as Large Integer values.
+// These attributes represent dates as the number of 100-nanosecond intervals since 12:00 AM January 1, 1601.
+// 100-nanosecond intervals, equal to 0.0000001 seconds, are also called ticks.
+// Dates in Active Directory are always saved in Coordinated Universal Time, or UTC.
+// More: https://social.technet.microsoft.com/wiki/contents/articles/31135.active-directory-large-integer-attributes.aspx
+//
+// If we directly follow the above algorithm we encounter time.Duration limits of 290 years and int overflow issues.
+// Thus below, we carefully sidestep those.
+func TicksToTime(ticks int64) time.Time {
+ origin := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()
+ secondsSinceOrigin := ticks / ticksPerSecond
+ remainingNanoseconds := ticks % ticksPerSecond * 100
+ return time.Unix(origin+secondsSinceOrigin, remainingNanoseconds).UTC()
+}
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/engineconf.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/engineconf.go
new file mode 100644
index 000000000..8e84807d0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/engineconf.go
@@ -0,0 +1,10 @@
+package plugin
+
+import (
+ "github.com/hashicorp/vault/helper/ldaputil"
+)
+
+type configuration struct {
+ PasswordConf *passwordConf
+ ADConf *ldaputil.ConfigEntry
+}
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/passwordconf.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/passwordconf.go
new file mode 100644
index 000000000..83ebcaed5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/passwordconf.go
@@ -0,0 +1,15 @@
+package plugin
+
+type passwordConf struct {
+ TTL int `json:"ttl"`
+ MaxTTL int `json:"max_ttl"`
+ Length int `json:"length"`
+}
+
+func (c *passwordConf) Map() map[string]interface{} {
+ return map[string]interface{}{
+ "ttl": c.TTL,
+ "max_ttl": c.MaxTTL,
+ "length": c.Length,
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_config.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_config.go
new file mode 100644
index 000000000..2df419b61
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_config.go
@@ -0,0 +1,201 @@
+package plugin
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/hashicorp/vault-plugin-secrets-ad/plugin/util"
+ "github.com/hashicorp/vault/helper/ldaputil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+const (
+ configPath = "config"
+ configStorageKey = "config"
+
+ // This length is arbitrarily chosen but should work for
+ // most Active Directory minimum and maximum length settings.
+ // A bit tongue-in-cheek since programmers love their base-2 exponents.
+ defaultPasswordLength = 64
+
+ defaultTLSVersion = "tls12"
+)
+
+func (b *backend) readConfig(ctx context.Context, storage logical.Storage) (*configuration, error) {
+ entry, err := storage.Get(ctx, configStorageKey)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+ config := &configuration{&passwordConf{}, &ldaputil.ConfigEntry{}}
+ if err := entry.DecodeJSON(config); err != nil {
+ return nil, err
+ }
+ return config, nil
+}
+
+func (b *backend) pathConfig() *framework.Path {
+ return &framework.Path{
+ Pattern: configPath,
+ Fields: b.configFields(),
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.configUpdateOperation,
+ logical.ReadOperation: b.configReadOperation,
+ logical.DeleteOperation: b.configDeleteOperation,
+ },
+ HelpSynopsis: configHelpSynopsis,
+ HelpDescription: configHelpDescription,
+ }
+}
+
+func (b *backend) configFields() map[string]*framework.FieldSchema {
+ fields := ldaputil.ConfigFields()
+ fields["ttl"] = &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Description: "In seconds, the default password time-to-live.",
+ }
+ fields["max_ttl"] = &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Description: "In seconds, the maximum password time-to-live.",
+ }
+ fields["length"] = &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Default: defaultPasswordLength,
+ Description: "The desired length of passwords that Vault generates.",
+ }
+ return fields
+}
+
+func (b *backend) configUpdateOperation(ctx context.Context, req *logical.Request, fieldData *framework.FieldData) (*logical.Response, error) {
+ // Build and validate the ldap conf.
+ activeDirectoryConf, err := ldaputil.NewConfigEntry(fieldData)
+ if err != nil {
+ return nil, err
+ }
+ if err := activeDirectoryConf.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Build the password conf.
+ ttl := fieldData.Get("ttl").(int)
+ maxTTL := fieldData.Get("max_ttl").(int)
+ length := fieldData.Get("length").(int)
+
+ if ttl == 0 {
+ ttl = int(b.System().DefaultLeaseTTL().Seconds())
+ }
+ if maxTTL == 0 {
+ maxTTL = int(b.System().MaxLeaseTTL().Seconds())
+ }
+ if ttl > maxTTL {
+ return nil, errors.New("ttl must be smaller than or equal to max_ttl")
+ }
+ if ttl < 1 {
+ return nil, errors.New("ttl must be positive")
+ }
+ if maxTTL < 1 {
+ return nil, errors.New("max_ttl must be positive")
+ }
+ if length < util.MinimumPasswordLength {
+ return nil, fmt.Errorf("minimum password length is %d for sufficient complexity to be secure, though Vault recommends a higher length", util.MinimumPasswordLength)
+ }
+ passwordConf := &passwordConf{
+ TTL: ttl,
+ MaxTTL: maxTTL,
+ Length: length,
+ }
+
+ config := &configuration{passwordConf, activeDirectoryConf}
+ entry, err := logical.StorageEntryJSON(configStorageKey, config)
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(ctx, entry); err != nil {
+ return nil, err
+ }
+
+ // Respond with a 204.
+ return nil, nil
+}
+
+func (b *backend) configReadOperation(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) {
+ config, err := b.readConfig(ctx, req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if config == nil {
+ return nil, nil
+ }
+
+ // NOTE:
+ // "password" is intentionally not returned by this endpoint,
+ // as we lean away from returning sensitive information unless it's absolutely necessary.
+ // Also, we don't return the full ADConf here because not all parameters are used by this engine.
+ configMap := map[string]interface{}{
+ "url": config.ADConf.Url,
+ "starttls": config.ADConf.StartTLS,
+ "insecure_tls": config.ADConf.InsecureTLS,
+ "certificate": config.ADConf.Certificate,
+ "binddn": config.ADConf.BindDN,
+ "userdn": config.ADConf.UserDN,
+ "upndomain": config.ADConf.UPNDomain,
+ "tls_min_version": config.ADConf.TLSMinVersion,
+ "tls_max_version": config.ADConf.TLSMaxVersion,
+ }
+ for k, v := range config.PasswordConf.Map() {
+ configMap[k] = v
+ }
+
+ resp := &logical.Response{
+ Data: configMap,
+ }
+ return resp, nil
+}
+
+func (b *backend) configDeleteOperation(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) {
+ if err := req.Storage.Delete(ctx, configStorageKey); err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+const (
+ configHelpSynopsis = `
+Configure the AD server to connect to, along with password options.
+`
+ configHelpDescription = `
+This endpoint allows you to configure the AD server to connect to and its
+configuration options. When you add, update, or delete a config, it takes
+immediate effect on all subsequent actions. It does not apply itself to roles
+or creds added in the past.
+
+The AD URL can use either the "ldap://" or "ldaps://" schema. In the former
+case, an unencrypted connection will be made with a default port of 389, unless
+the "starttls" parameter is set to true, in which case TLS will be used. In the
+latter case, a SSL connection will be established with a default port of 636.
+
+## A NOTE ON ESCAPING
+
+It is up to the administrator to provide properly escaped DNs. This includes
+the user DN, bind DN for search, and so on.
+
+The only DN escaping performed by this backend is on usernames given at login
+time when they are inserted into the final bind DN, and uses escaping rules
+defined in RFC 4514.
+
+Additionally, Active Directory has escaping rules that differ slightly from the
+RFC; in particular it requires escaping of '#' regardless of position in the DN
+(the RFC only requires it to be escaped when it is the first character), and
+'=', which the RFC indicates can be escaped with a backslash, but does not
+contain in its set of required escapes. If you are using Active Directory and
+these appear in your usernames, please ensure that they are escaped, in
+addition to being properly escaped in your configured DNs.
+
+For reference, see https://www.ietf.org/rfc/rfc4514.txt and
+http://social.technet.microsoft.com/wiki/contents/articles/5312.active-directory-characters-to-escape.aspx
+`
+)
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_creds.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_creds.go
new file mode 100644
index 000000000..fbb284e4f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_creds.go
@@ -0,0 +1,216 @@
+package plugin
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/go-errors/errors"
+ "github.com/hashicorp/vault-plugin-secrets-ad/plugin/util"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+const (
+ credPrefix = "creds/"
+ storageKey = "creds"
+
+ // Since Active Directory offers eventual consistency, in testing we found that sometimes
+ // Active Directory returned "password last set" times that were _later_ than our own,
+ // even though ours were captured after synchronously completing a password update operation.
+ //
+ // An example we captured was:
+ // last_vault_rotation 2018-04-18T22:29:57.385454779Z
+ // password_last_set 2018-04-18T22:29:57.3902786Z
+ //
+ // Thus we add a short time buffer when checking whether anyone _else_ updated the AD password
+ // since Vault last rotated it.
+ passwordLastSetBuffer = time.Second
+
+ // Since password TTL can be set to as low as 1 second,
+ // we can't cache passwords for an entire second.
+ credCacheCleanup = time.Second / 3
+ credCacheExpiration = time.Second / 2
+)
+
+// deleteCred fulfills the DeleteWatcher interface in roles.
+// It allows the roleHandler to let us know when a role's been deleted so we can delete its associated creds too.
+func (b *backend) deleteCred(ctx context.Context, storage logical.Storage, roleName string) error {
+ if err := storage.Delete(ctx, storageKey+"/"+roleName); err != nil {
+ return err
+ }
+ b.credCache.Delete(roleName)
+ return nil
+}
+
+func (b *backend) invalidateCred(ctx context.Context, key string) {
+ if strings.HasPrefix(key, credPrefix) {
+ roleName := key[len(credPrefix):]
+ b.credCache.Delete(roleName)
+ }
+}
+
+func (b *backend) pathCreds() *framework.Path {
+ return &framework.Path{
+ Pattern: credPrefix + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": {
+ Type: framework.TypeString,
+ Description: "Name of the role",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.credReadOperation,
+ },
+ HelpSynopsis: credHelpSynopsis,
+ HelpDescription: credHelpDescription,
+ }
+}
+
+func (b *backend) credReadOperation(ctx context.Context, req *logical.Request, fieldData *framework.FieldData) (*logical.Response, error) {
+ cred := make(map[string]interface{})
+
+ roleName := fieldData.Get("name").(string)
+
+ // We act upon quite a few things below that could be racy if not locked:
+ // - Roles. If a new cred is created, the role is updated to include the new LastVaultRotation time,
+ // effecting role storage (and the role cache, but that's already thread-safe).
+ // - Creds. New creds involve writing to cred storage and the cred cache (also already thread-safe).
+ // Rather than setting read locks of different types, and upgrading them to write locks, let's keep complexity
+ // low and use one simple mutex.
+ b.credLock.Lock()
+ defer b.credLock.Unlock()
+
+ role, err := b.readRole(ctx, req.Storage, roleName)
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ var resp *logical.Response
+ var respErr error
+ var unset time.Time
+
+ switch {
+
+ case role.LastVaultRotation == unset:
+ // We've never managed this cred before.
+ // We need to rotate the password so Vault will know it.
+ resp, respErr = b.generateAndReturnCreds(ctx, req.Storage, roleName, role, cred)
+
+ case role.PasswordLastSet.After(role.LastVaultRotation.Add(passwordLastSetBuffer)):
+ // Someone has manually rotated the password in Active Directory since we last rolled it.
+ // We need to rotate it now so Vault will know it and be able to return it.
+ resp, respErr = b.generateAndReturnCreds(ctx, req.Storage, roleName, role, cred)
+
+ default:
+ // Since we should know the last password, let's retrieve it now so we can return it with the new one.
+ credIfc, found := b.credCache.Get(roleName)
+ if found {
+ cred = credIfc.(map[string]interface{})
+ } else {
+ entry, err := req.Storage.Get(ctx, storageKey+"/"+roleName)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ // If the creds aren't in storage, but roles are and we've created creds before,
+ // this is an unexpected state and something has gone wrong.
+ // Let's be explicit and error about this.
+ return nil, fmt.Errorf("should have the creds for %+v but they're not found", role)
+ }
+ if err := entry.DecodeJSON(&cred); err != nil {
+ return nil, err
+ }
+ b.credCache.SetDefault(roleName, cred)
+ }
+
+ // Is the password too old?
+ // If so, time for a new one!
+ now := time.Now().UTC()
+ shouldBeRolled := role.LastVaultRotation.Add(time.Duration(role.TTL) * time.Second) // already in UTC
+ if now.After(shouldBeRolled) {
+ resp, respErr = b.generateAndReturnCreds(ctx, req.Storage, roleName, role, cred)
+ } else {
+ resp = &logical.Response{
+ Data: cred,
+ }
+ }
+ }
+ if respErr != nil {
+ return nil, err
+ }
+ if resp == nil {
+ return nil, nil
+ }
+ return resp, nil
+}
+
+func (b *backend) generateAndReturnCreds(ctx context.Context, storage logical.Storage, roleName string, role *backendRole, previousCred map[string]interface{}) (*logical.Response, error) {
+ engineConf, err := b.readConfig(ctx, storage)
+ if err != nil {
+ return nil, err
+ }
+ if engineConf == nil {
+ return nil, errors.New("the config is currently unset")
+ }
+
+ newPassword, err := util.GeneratePassword(engineConf.PasswordConf.Length)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := b.client.UpdatePassword(engineConf.ADConf, role.ServiceAccountName, newPassword); err != nil {
+ return nil, err
+ }
+
+ // Time recorded is in UTC for easier user comparison to AD's last rotated time, which is set to UTC by Microsoft.
+ role.LastVaultRotation = time.Now().UTC()
+ if err := b.writeRole(ctx, storage, roleName, role); err != nil {
+ return nil, err
+ }
+
+ // Although a service account name is typically my_app@example.com,
+ // the username it uses is just my_app, or everything before the @.
+ var username string
+ fields := strings.Split(role.ServiceAccountName, "@")
+ if len(fields) > 0 {
+ username = fields[0]
+ } else {
+ return nil, fmt.Errorf("unable to infer username from service account name: %s", role.ServiceAccountName)
+ }
+
+ cred := map[string]interface{}{
+ "username": username,
+ "current_password": newPassword,
+ }
+ if previousCred["current_password"] != nil {
+ cred["last_password"] = previousCred["current_password"]
+ }
+
+ // Cache and save the cred.
+ entry, err := logical.StorageEntryJSON(storageKey+"/"+roleName, cred)
+ if err != nil {
+ return nil, err
+ }
+ if err := storage.Put(ctx, entry); err != nil {
+ return nil, err
+ }
+ b.credCache.SetDefault(roleName, cred)
+
+ return &logical.Response{
+ Data: cred,
+ }, nil
+}
+
+const (
+ credHelpSynopsis = `
+Retrieve a role's creds by role name.
+`
+ credHelpDescription = `
+Read creds using a role's name to view the login, current password, and last password.
+`
+)
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_roles.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_roles.go
new file mode 100644
index 000000000..e57b89857
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_roles.go
@@ -0,0 +1,254 @@
+package plugin
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/go-errors/errors"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+const (
+ rolePath = "roles"
+ rolePrefix = "roles/"
+ roleStorageKey = "roles"
+
+ roleCacheCleanup = time.Second / 2
+ roleCacheExpiration = time.Second
+)
+
+func (b *backend) invalidateRole(ctx context.Context, key string) {
+ if strings.HasPrefix(key, rolePrefix) {
+ roleName := key[len(rolePrefix):]
+ b.roleCache.Delete(roleName)
+ }
+}
+
+func (b *backend) pathListRoles() *framework.Path {
+ return &framework.Path{
+ Pattern: rolePrefix + "?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.roleListOperation,
+ },
+
+ HelpSynopsis: pathListRolesHelpSyn,
+ HelpDescription: pathListRolesHelpDesc,
+ }
+}
+
+func (b *backend) pathRoles() *framework.Path {
+ return &framework.Path{
+ Pattern: rolePrefix + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": {
+ Type: framework.TypeString,
+ Description: "Name of the role",
+ },
+ "service_account_name": {
+ Type: framework.TypeString,
+ Description: "The username/logon name for the service account with which this role will be associated.",
+ },
+ "ttl": {
+ Type: framework.TypeDurationSecond,
+ Description: "In seconds, the default password time-to-live.",
+ },
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.roleUpdateOperation,
+ logical.ReadOperation: b.roleReadOperation,
+ logical.DeleteOperation: b.roleDeleteOperation,
+ },
+ HelpSynopsis: roleHelpSynopsis,
+ HelpDescription: roleHelpDescription,
+ }
+}
+
+func (b *backend) readRole(ctx context.Context, storage logical.Storage, roleName string) (*backendRole, error) {
+ // If it's cached, return it from there.
+ roleIfc, found := b.roleCache.Get(roleName)
+ if found {
+ return roleIfc.(*backendRole), nil
+ }
+
+ // It's not, read it from storage.
+ entry, err := storage.Get(ctx, roleStorageKey+"/"+roleName)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ role := &backendRole{}
+ if err := entry.DecodeJSON(role); err != nil {
+ return nil, err
+ }
+
+ // Always check when ActiveDirectory shows the password as last set on the fly.
+ engineConf, err := b.readConfig(ctx, storage)
+ if err != nil {
+ return nil, err
+ }
+ if engineConf == nil {
+ return nil, errors.New("the config is currently unset")
+ }
+
+ passwordLastSet, err := b.client.GetPasswordLastSet(engineConf.ADConf, role.ServiceAccountName)
+ if err != nil {
+ return nil, err
+ }
+ role.PasswordLastSet = passwordLastSet
+
+ // Cache it.
+ b.roleCache.SetDefault(roleName, role)
+ return role, nil
+}
+
+func (b *backend) writeRole(ctx context.Context, storage logical.Storage, roleName string, role *backendRole) error {
+ entry, err := logical.StorageEntryJSON(roleStorageKey+"/"+roleName, role)
+ if err != nil {
+ return err
+ }
+ if err := storage.Put(ctx, entry); err != nil {
+ return err
+ }
+ b.roleCache.SetDefault(roleName, role)
+ return nil
+}
+
+func (b *backend) roleUpdateOperation(ctx context.Context, req *logical.Request, fieldData *framework.FieldData) (*logical.Response, error) {
+ // Get everything we need to construct the role.
+ roleName := fieldData.Get("name").(string)
+
+ engineConf, err := b.readConfig(ctx, req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if engineConf == nil {
+ return nil, errors.New("the config is currently unset")
+ }
+
+ // Actually construct it.
+ serviceAccountName, err := getServiceAccountName(fieldData)
+ if err != nil {
+ return nil, err
+ }
+
+ // verify service account exists
+ _, err = b.client.Get(engineConf.ADConf, serviceAccountName)
+ if err != nil {
+ return nil, err
+ }
+
+ ttl, err := getValidatedTTL(engineConf.PasswordConf, fieldData)
+ if err != nil {
+ return nil, err
+ }
+ role := &backendRole{
+ ServiceAccountName: serviceAccountName,
+ TTL: ttl,
+ }
+
+ // Was there already a role before that we're now overwriting? If so, let's carry forward the LastVaultRotation.
+ oldRole, err := b.readRole(ctx, req.Storage, roleName)
+ if err != nil {
+ return nil, err
+ } else {
+ if oldRole != nil {
+ role.LastVaultRotation = oldRole.LastVaultRotation
+ }
+ }
+
+ // writeRole it to storage and the roleCache.
+ if err := b.writeRole(ctx, req.Storage, roleName, role); err != nil {
+ return nil, err
+ }
+
+ // Return a 204.
+ return nil, nil
+}
+
+func (b *backend) roleReadOperation(ctx context.Context, req *logical.Request, fieldData *framework.FieldData) (*logical.Response, error) {
+ roleName := fieldData.Get("name").(string)
+
+ role, err := b.readRole(ctx, req.Storage, roleName)
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: role.Map(),
+ }, nil
+}
+
+func (b *backend) roleListOperation(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) {
+ keys, err := req.Storage.List(ctx, roleStorageKey+"/")
+ if err != nil {
+ return nil, err
+ }
+ return logical.ListResponse(keys), nil
+}
+
+func (b *backend) roleDeleteOperation(ctx context.Context, req *logical.Request, fieldData *framework.FieldData) (*logical.Response, error) {
+ roleName := fieldData.Get("name").(string)
+
+ if err := req.Storage.Delete(ctx, roleStorageKey+"/"+roleName); err != nil {
+ return nil, err
+ }
+
+ b.roleCache.Delete(roleName)
+
+ if err := b.deleteCred(ctx, req.Storage, roleName); err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+func getServiceAccountName(fieldData *framework.FieldData) (string, error) {
+ serviceAccountName := fieldData.Get("service_account_name").(string)
+ if serviceAccountName == "" {
+ return "", errors.New("\"service_account_name\" is required")
+ }
+ return serviceAccountName, nil
+}
+
+func getValidatedTTL(passwordConf *passwordConf, fieldData *framework.FieldData) (int, error) {
+ ttl := fieldData.Get("ttl").(int)
+ if ttl == 0 {
+ ttl = passwordConf.TTL
+ }
+ if ttl > passwordConf.MaxTTL {
+ return 0, fmt.Errorf("requested ttl of %d seconds is over the max ttl of %d seconds", ttl, passwordConf.MaxTTL)
+ }
+ if ttl < 0 {
+ return 0, fmt.Errorf("ttl can't be negative")
+ }
+ return ttl, nil
+}
+
+const (
+ roleHelpSynopsis = `
+Manage roles to build links between Vault and Active Directory service accounts.
+`
+ roleHelpDescription = `
+This endpoint allows you to read, write, and delete individual roles that are used for enabling password rotation.
+
+Deleting a role will not disable its current password. It will delete the role's associated creds in Vault.
+`
+
+ pathListRolesHelpSyn = `
+List the name of each role currently stored.
+`
+ pathListRolesHelpDesc = `
+To learn which service accounts are being managed by Vault, list the role names using
+this endpoint. Then read any individual role by name to learn more, like the name of
+the service account it's associated with.
+`
+)
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/role.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/role.go
new file mode 100644
index 000000000..8f653d7b7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/role.go
@@ -0,0 +1,28 @@
+package plugin
+
+import (
+ "time"
+)
+
+type backendRole struct {
+ ServiceAccountName string `json:"service_account_name"`
+ TTL int `json:"ttl"`
+ LastVaultRotation time.Time `json:"last_vault_rotation"`
+ PasswordLastSet time.Time `json:"password_last_set"`
+}
+
+func (r *backendRole) Map() map[string]interface{} {
+ m := map[string]interface{}{
+ "service_account_name": r.ServiceAccountName,
+ "ttl": r.TTL,
+ }
+
+ var unset time.Time
+ if r.LastVaultRotation != unset {
+ m["last_vault_rotation"] = r.LastVaultRotation
+ }
+ if r.PasswordLastSet != unset {
+ m["password_last_set"] = r.PasswordLastSet
+ }
+ return m
+}
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/util/passwords.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/util/passwords.go
new file mode 100644
index 000000000..a6a4e872d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/util/passwords.go
@@ -0,0 +1,38 @@
+package util
+
+import (
+ "encoding/base64"
+ "fmt"
+
+ "github.com/hashicorp/go-uuid"
+)
+
+var (
+ // Per https://en.wikipedia.org/wiki/Password_strength#Guidelines_for_strong_passwords
+ minimumLengthOfComplexString = 8
+
+ PasswordComplexityPrefix = "?@09AZ"
+ MinimumPasswordLength = len(PasswordComplexityPrefix) + minimumLengthOfComplexString
+)
+
+func GeneratePassword(desiredLength int) (string, error) {
+ if desiredLength < MinimumPasswordLength {
+ return "", fmt.Errorf("it's not possible to generate a _secure_ password of length %d, please boost length to %d, though Vault recommends higher", desiredLength, MinimumPasswordLength)
+ }
+
+ b, err := uuid.GenerateRandomBytes(desiredLength)
+ if err != nil {
+ return "", err
+ }
+
+ result := ""
+ // Though the result should immediately be longer than the desiredLength,
+ // do this in a loop to ensure there's absolutely no risk of a panic when slicing it down later.
+ for len(result) <= desiredLength {
+ // Encode to base64 because it's more complex.
+ result += base64.StdEncoding.EncodeToString(b)
+ }
+
+ result = PasswordComplexityPrefix + result
+ return result[:desiredLength], nil
+}
diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/util/secrets_client.go b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/util/secrets_client.go
new file mode 100644
index 000000000..cc5d9693d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/util/secrets_client.go
@@ -0,0 +1,73 @@
+package util
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/vault-plugin-secrets-ad/plugin/client"
+ "github.com/hashicorp/vault/helper/ldaputil"
+)
+
+func NewSecretsClient(logger hclog.Logger) *SecretsClient {
+ return &SecretsClient{adClient: client.NewClient(logger)}
+}
+
+// SecretsClient wraps a *activeDirectory.activeDirectoryClient to expose just the common convenience methods needed by the ad secrets backend.
+type SecretsClient struct {
+ adClient *client.Client
+}
+
+func (c *SecretsClient) Get(conf *ldaputil.ConfigEntry, serviceAccountName string) (*client.Entry, error) {
+ filters := map[*client.Field][]string{
+ client.FieldRegistry.UserPrincipalName: {serviceAccountName},
+ }
+
+ entries, err := c.adClient.Search(conf, filters)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(entries) == 0 {
+ return nil, fmt.Errorf("unable to find service account named %s in active directory, searches are case sensitive", serviceAccountName)
+ }
+ if len(entries) > 1 {
+ return nil, fmt.Errorf("expected one matching service account, but received %s", entries)
+ }
+ return entries[0], nil
+}
+
+func (c *SecretsClient) GetPasswordLastSet(conf *ldaputil.ConfigEntry, serviceAccountName string) (time.Time, error) {
+ entry, err := c.Get(conf, serviceAccountName)
+ if err != nil {
+ return time.Time{}, err
+ }
+
+ values, found := entry.Get(client.FieldRegistry.PasswordLastSet)
+ if !found {
+ return time.Time{}, fmt.Errorf("%+v lacks a PasswordLastSet field", entry)
+ }
+
+ if len(values) != 1 {
+ return time.Time{}, fmt.Errorf("expected only one value for PasswordLastSet, but received %s", values)
+ }
+
+ ticks := values[0]
+ if ticks == "0" {
+ // password has never been rolled in Active Directory, only created
+ return time.Time{}, nil
+ }
+
+ t, err := client.ParseTicks(ticks)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return t, nil
+}
+
+func (c *SecretsClient) UpdatePassword(conf *ldaputil.ConfigEntry, serviceAccountName string, newPassword string) error {
+ filters := map[*client.Field][]string{
+ client.FieldRegistry.UserPrincipalName: {serviceAccountName},
+ }
+ return c.adClient.UpdatePassword(conf, filters, newPassword)
+}
diff --git a/vendor/golang.org/x/text/encoding/encoding.go b/vendor/golang.org/x/text/encoding/encoding.go
new file mode 100644
index 000000000..221f175c0
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/encoding.go
@@ -0,0 +1,335 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package encoding defines an interface for character encodings, such as Shift
+// JIS and Windows 1252, that can convert to and from UTF-8.
+//
+// Encoding implementations are provided in other packages, such as
+// golang.org/x/text/encoding/charmap and
+// golang.org/x/text/encoding/japanese.
+package encoding // import "golang.org/x/text/encoding"
+
+import (
+ "errors"
+ "io"
+ "strconv"
+ "unicode/utf8"
+
+ "golang.org/x/text/encoding/internal/identifier"
+ "golang.org/x/text/transform"
+)
+
+// TODO:
+// - There seems to be some inconsistency in when decoders return errors
+// and when not. Also documentation seems to suggest they shouldn't return
+// errors at all (except for UTF-16).
+// - Encoders seem to rely on or at least benefit from the input being in NFC
+// normal form. Perhaps add an example how users could prepare their output.
+
+// Encoding is a character set encoding that can be transformed to and from
+// UTF-8.
+type Encoding interface {
+ // NewDecoder returns a Decoder.
+ NewDecoder() *Decoder
+
+ // NewEncoder returns an Encoder.
+ NewEncoder() *Encoder
+}
+
+// A Decoder converts bytes to UTF-8. It implements transform.Transformer.
+//
+// Transforming source bytes that are not of that encoding will not result in an
+// error per se. Each byte that cannot be transcoded will be represented in the
+// output by the UTF-8 encoding of '\uFFFD', the replacement rune.
+type Decoder struct {
+ transform.Transformer
+
+ // This forces external creators of Decoders to use names in struct
+ // initializers, allowing for future extendibility without having to break
+ // code.
+ _ struct{}
+}
+
+// Bytes converts the given encoded bytes to UTF-8. It returns the converted
+// bytes or nil, err if any error occurred.
+func (d *Decoder) Bytes(b []byte) ([]byte, error) {
+ b, _, err := transform.Bytes(d, b)
+ if err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+// String converts the given encoded string to UTF-8. It returns the converted
+// string or "", err if any error occurred.
+func (d *Decoder) String(s string) (string, error) {
+ s, _, err := transform.String(d, s)
+ if err != nil {
+ return "", err
+ }
+ return s, nil
+}
+
+// Reader wraps another Reader to decode its bytes.
+//
+// The Decoder may not be used for any other operation as long as the returned
+// Reader is in use.
+func (d *Decoder) Reader(r io.Reader) io.Reader {
+ return transform.NewReader(r, d)
+}
+
+// An Encoder converts bytes from UTF-8. It implements transform.Transformer.
+//
+// Each rune that cannot be transcoded will result in an error. In this case,
+// the transform will consume all source byte up to, not including the offending
+// rune. Transforming source bytes that are not valid UTF-8 will be replaced by
+// `\uFFFD`. To return early with an error instead, use transform.Chain to
+// preprocess the data with a UTF8Validator.
+type Encoder struct {
+ transform.Transformer
+
+ // This forces external creators of Encoders to use names in struct
+ // initializers, allowing for future extendibility without having to break
+ // code.
+ _ struct{}
+}
+
+// Bytes converts bytes from UTF-8. It returns the converted bytes or nil, err if
+// any error occurred.
+func (e *Encoder) Bytes(b []byte) ([]byte, error) {
+ b, _, err := transform.Bytes(e, b)
+ if err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+// String converts a string from UTF-8. It returns the converted string or
+// "", err if any error occurred.
+func (e *Encoder) String(s string) (string, error) {
+ s, _, err := transform.String(e, s)
+ if err != nil {
+ return "", err
+ }
+ return s, nil
+}
+
+// Writer wraps another Writer to encode its UTF-8 output.
+//
+// The Encoder may not be used for any other operation as long as the returned
+// Writer is in use.
+func (e *Encoder) Writer(w io.Writer) io.Writer {
+ return transform.NewWriter(w, e)
+}
+
+// ASCIISub is the ASCII substitute character, as recommended by
+// http://unicode.org/reports/tr36/#Text_Comparison
+const ASCIISub = '\x1a'
+
+// Nop is the nop encoding. Its transformed bytes are the same as the source
+// bytes; it does not replace invalid UTF-8 sequences.
+var Nop Encoding = nop{}
+
+type nop struct{}
+
+func (nop) NewDecoder() *Decoder {
+ return &Decoder{Transformer: transform.Nop}
+}
+func (nop) NewEncoder() *Encoder {
+ return &Encoder{Transformer: transform.Nop}
+}
+
+// Replacement is the replacement encoding. Decoding from the replacement
+// encoding yields a single '\uFFFD' replacement rune. Encoding from UTF-8 to
+// the replacement encoding yields the same as the source bytes except that
+// invalid UTF-8 is converted to '\uFFFD'.
+//
+// It is defined at http://encoding.spec.whatwg.org/#replacement
+var Replacement Encoding = replacement{}
+
+type replacement struct{}
+
+func (replacement) NewDecoder() *Decoder {
+ return &Decoder{Transformer: replacementDecoder{}}
+}
+
+func (replacement) NewEncoder() *Encoder {
+ return &Encoder{Transformer: replacementEncoder{}}
+}
+
+func (replacement) ID() (mib identifier.MIB, other string) {
+ return identifier.Replacement, ""
+}
+
+type replacementDecoder struct{ transform.NopResetter }
+
+func (replacementDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ if len(dst) < 3 {
+ return 0, 0, transform.ErrShortDst
+ }
+ if atEOF {
+ const fffd = "\ufffd"
+ dst[0] = fffd[0]
+ dst[1] = fffd[1]
+ dst[2] = fffd[2]
+ nDst = 3
+ }
+ return nDst, len(src), nil
+}
+
+type replacementEncoder struct{ transform.NopResetter }
+
+func (replacementEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ r, size := rune(0), 0
+
+ for ; nSrc < len(src); nSrc += size {
+ r = rune(src[nSrc])
+
+ // Decode a 1-byte rune.
+ if r < utf8.RuneSelf {
+ size = 1
+
+ } else {
+ // Decode a multi-byte rune.
+ r, size = utf8.DecodeRune(src[nSrc:])
+ if size == 1 {
+ // All valid runes of size 1 (those below utf8.RuneSelf) were
+ // handled above. We have invalid UTF-8 or we haven't seen the
+ // full character yet.
+ if !atEOF && !utf8.FullRune(src[nSrc:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+ r = '\ufffd'
+ }
+ }
+
+ if nDst+utf8.RuneLen(r) > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ nDst += utf8.EncodeRune(dst[nDst:], r)
+ }
+ return nDst, nSrc, err
+}
+
+// HTMLEscapeUnsupported wraps encoders to replace source runes outside the
+// repertoire of the destination encoding with HTML escape sequences.
+//
+// This wrapper exists to comply to URL and HTML forms requiring a
+// non-terminating legacy encoder. The produced sequences may lead to data
+// loss as they are indistinguishable from legitimate input. To avoid this
+// issue, use UTF-8 encodings whenever possible.
+func HTMLEscapeUnsupported(e *Encoder) *Encoder {
+ return &Encoder{Transformer: &errorHandler{e, errorToHTML}}
+}
+
+// ReplaceUnsupported wraps encoders to replace source runes outside the
+// repertoire of the destination encoding with an encoding-specific
+// replacement.
+//
+// This wrapper is only provided for backwards compatibility and legacy
+// handling. Its use is strongly discouraged. Use UTF-8 whenever possible.
+func ReplaceUnsupported(e *Encoder) *Encoder {
+ return &Encoder{Transformer: &errorHandler{e, errorToReplacement}}
+}
+
+type errorHandler struct {
+ *Encoder
+ handler func(dst []byte, r rune, err repertoireError) (n int, ok bool)
+}
+
+// TODO: consider making this error public in some form.
+type repertoireError interface {
+ Replacement() byte
+}
+
+func (h errorHandler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ nDst, nSrc, err = h.Transformer.Transform(dst, src, atEOF)
+ for err != nil {
+ rerr, ok := err.(repertoireError)
+ if !ok {
+ return nDst, nSrc, err
+ }
+ r, sz := utf8.DecodeRune(src[nSrc:])
+ n, ok := h.handler(dst[nDst:], r, rerr)
+ if !ok {
+ return nDst, nSrc, transform.ErrShortDst
+ }
+ err = nil
+ nDst += n
+ if nSrc += sz; nSrc < len(src) {
+ var dn, sn int
+ dn, sn, err = h.Transformer.Transform(dst[nDst:], src[nSrc:], atEOF)
+ nDst += dn
+ nSrc += sn
+ }
+ }
+ return nDst, nSrc, err
+}
+
+func errorToHTML(dst []byte, r rune, err repertoireError) (n int, ok bool) {
+ buf := [8]byte{}
+ b := strconv.AppendUint(buf[:0], uint64(r), 10)
+ if n = len(b) + len(""); n >= len(dst) {
+ return 0, false
+ }
+ dst[0] = '&'
+ dst[1] = '#'
+ dst[copy(dst[2:], b)+2] = ';'
+ return n, true
+}
+
+func errorToReplacement(dst []byte, r rune, err repertoireError) (n int, ok bool) {
+ if len(dst) == 0 {
+ return 0, false
+ }
+ dst[0] = err.Replacement()
+ return 1, true
+}
+
+// ErrInvalidUTF8 means that a transformer encountered invalid UTF-8.
+var ErrInvalidUTF8 = errors.New("encoding: invalid UTF-8")
+
+// UTF8Validator is a transformer that returns ErrInvalidUTF8 on the first
+// input byte that is not valid UTF-8.
+var UTF8Validator transform.Transformer = utf8Validator{}
+
+type utf8Validator struct{ transform.NopResetter }
+
+func (utf8Validator) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ n := len(src)
+ if n > len(dst) {
+ n = len(dst)
+ }
+ for i := 0; i < n; {
+ if c := src[i]; c < utf8.RuneSelf {
+ dst[i] = c
+ i++
+ continue
+ }
+ _, size := utf8.DecodeRune(src[i:])
+ if size == 1 {
+ // All valid runes of size 1 (those below utf8.RuneSelf) were
+ // handled above. We have invalid UTF-8 or we haven't seen the
+ // full character yet.
+ err = ErrInvalidUTF8
+ if !atEOF && !utf8.FullRune(src[i:]) {
+ err = transform.ErrShortSrc
+ }
+ return i, i, err
+ }
+ if i+size > len(dst) {
+ return i, i, transform.ErrShortDst
+ }
+ for ; size > 0; size-- {
+ dst[i] = src[i]
+ i++
+ }
+ }
+ if len(src) > len(dst) {
+ err = transform.ErrShortDst
+ }
+ return n, n, err
+}
diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go b/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go
new file mode 100644
index 000000000..7351b4ef8
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go
@@ -0,0 +1,81 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run gen.go
+
+// Package identifier defines the contract between implementations of Encoding
+// and Index by defining identifiers that uniquely identify standardized coded
+// character sets (CCS) and character encoding schemes (CES), which we will
+// together refer to as encodings, for which Encoding implementations provide
+// converters to and from UTF-8. This package is typically only of concern to
+// implementers of Indexes and Encodings.
+//
+// One part of the identifier is the MIB code, which is defined by IANA and
+// uniquely identifies a CCS or CES. Each code is associated with data that
+// references authorities, official documentation as well as aliases and MIME
+// names.
+//
+// Not all CESs are covered by the IANA registry. The "other" string that is
+// returned by ID can be used to identify other character sets or versions of
+// existing ones.
+//
+// It is recommended that each package that provides a set of Encodings provide
+// the All and Common variables to reference all supported encodings and
+// commonly used subset. This allows Index implementations to include all
+// available encodings without explicitly referencing or knowing about them.
+package identifier
+
+// Note: this package is internal, but could be made public if there is a need
+// for writing third-party Indexes and Encodings.
+
+// References:
+// - http://source.icu-project.org/repos/icu/icu/trunk/source/data/mappings/convrtrs.txt
+// - http://www.iana.org/assignments/character-sets/character-sets.xhtml
+// - http://www.iana.org/assignments/ianacharset-mib/ianacharset-mib
+// - http://www.ietf.org/rfc/rfc2978.txt
+// - http://www.unicode.org/reports/tr22/
+// - http://www.w3.org/TR/encoding/
+// - https://encoding.spec.whatwg.org/
+// - https://encoding.spec.whatwg.org/encodings.json
+// - https://tools.ietf.org/html/rfc6657#section-5
+
+// Interface can be implemented by Encodings to define the CCS or CES for which
+// it implements conversions.
+type Interface interface {
+ // ID returns an encoding identifier. Exactly one of the mib and other
+ // values should be non-zero.
+ //
+ // In the usual case it is only necessary to indicate the MIB code. The
+ // other string can be used to specify encodings for which there is no MIB,
+ // such as "x-mac-dingbat".
+ //
+ // The other string may only contain the characters a-z, A-Z, 0-9, - and _.
+ ID() (mib MIB, other string)
+
+ // NOTE: the restrictions on the encoding are to allow extending the syntax
+ // with additional information such as versions, vendors and other variants.
+}
+
+// A MIB identifies an encoding. It is derived from the IANA MIB codes and adds
+// some identifiers for some encodings that are not covered by the IANA
+// standard.
+//
+// See http://www.iana.org/assignments/ianacharset-mib.
+type MIB uint16
+
+// These additional MIB types are not defined in IANA. They are added because
+// they are common and defined within the text repo.
+const (
+ // Unofficial marks the start of encodings not registered by IANA.
+ Unofficial MIB = 10000 + iota
+
+ // Replacement is the WhatWG replacement encoding.
+ Replacement
+
+ // XUserDefined is the code for x-user-defined.
+ XUserDefined
+
+ // MacintoshCyrillic is the code for x-mac-cyrillic.
+ MacintoshCyrillic
+)
diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/mib.go b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go
new file mode 100644
index 000000000..768842b0a
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go
@@ -0,0 +1,1621 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package identifier
+
+const (
+ // ASCII is the MIB identifier with IANA name US-ASCII (MIME: US-ASCII).
+ //
+ // ANSI X3.4-1986
+ // Reference: RFC2046
+ ASCII MIB = 3
+
+ // ISOLatin1 is the MIB identifier with IANA name ISO_8859-1:1987 (MIME: ISO-8859-1).
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISOLatin1 MIB = 4
+
+ // ISOLatin2 is the MIB identifier with IANA name ISO_8859-2:1987 (MIME: ISO-8859-2).
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISOLatin2 MIB = 5
+
+ // ISOLatin3 is the MIB identifier with IANA name ISO_8859-3:1988 (MIME: ISO-8859-3).
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISOLatin3 MIB = 6
+
+ // ISOLatin4 is the MIB identifier with IANA name ISO_8859-4:1988 (MIME: ISO-8859-4).
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISOLatin4 MIB = 7
+
+ // ISOLatinCyrillic is the MIB identifier with IANA name ISO_8859-5:1988 (MIME: ISO-8859-5).
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISOLatinCyrillic MIB = 8
+
+ // ISOLatinArabic is the MIB identifier with IANA name ISO_8859-6:1987 (MIME: ISO-8859-6).
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISOLatinArabic MIB = 9
+
+ // ISOLatinGreek is the MIB identifier with IANA name ISO_8859-7:1987 (MIME: ISO-8859-7).
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1947
+ // Reference: RFC1345
+ ISOLatinGreek MIB = 10
+
+ // ISOLatinHebrew is the MIB identifier with IANA name ISO_8859-8:1988 (MIME: ISO-8859-8).
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISOLatinHebrew MIB = 11
+
+ // ISOLatin5 is the MIB identifier with IANA name ISO_8859-9:1989 (MIME: ISO-8859-9).
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISOLatin5 MIB = 12
+
+ // ISOLatin6 is the MIB identifier with IANA name ISO-8859-10 (MIME: ISO-8859-10).
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISOLatin6 MIB = 13
+
+ // ISOTextComm is the MIB identifier with IANA name ISO_6937-2-add.
+ //
+ // ISO-IR: International Register of Escape Sequences and ISO 6937-2:1983
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISOTextComm MIB = 14
+
+ // HalfWidthKatakana is the MIB identifier with IANA name JIS_X0201.
+ //
+ // JIS X 0201-1976. One byte only, this is equivalent to
+ // JIS/Roman (similar to ASCII) plus eight-bit half-width
+ // Katakana
+ // Reference: RFC1345
+ HalfWidthKatakana MIB = 15
+
+ // JISEncoding is the MIB identifier with IANA name JIS_Encoding.
+ //
+ // JIS X 0202-1991. Uses ISO 2022 escape sequences to
+ // shift code sets as documented in JIS X 0202-1991.
+ JISEncoding MIB = 16
+
+ // ShiftJIS is the MIB identifier with IANA name Shift_JIS (MIME: Shift_JIS).
+ //
+ // This charset is an extension of csHalfWidthKatakana by
+ // adding graphic characters in JIS X 0208. The CCS's are
+ // JIS X0201:1997 and JIS X0208:1997. The
+ // complete definition is shown in Appendix 1 of JIS
+ // X0208:1997.
+ // This charset can be used for the top-level media type "text".
+ ShiftJIS MIB = 17
+
+ // EUCPkdFmtJapanese is the MIB identifier with IANA name Extended_UNIX_Code_Packed_Format_for_Japanese (MIME: EUC-JP).
+ //
+ // Standardized by OSF, UNIX International, and UNIX Systems
+ // Laboratories Pacific. Uses ISO 2022 rules to select
+ // code set 0: US-ASCII (a single 7-bit byte set)
+ // code set 1: JIS X0208-1990 (a double 8-bit byte set)
+ // restricted to A0-FF in both bytes
+ // code set 2: Half Width Katakana (a single 7-bit byte set)
+ // requiring SS2 as the character prefix
+ // code set 3: JIS X0212-1990 (a double 7-bit byte set)
+ // restricted to A0-FF in both bytes
+ // requiring SS3 as the character prefix
+ EUCPkdFmtJapanese MIB = 18
+
+ // EUCFixWidJapanese is the MIB identifier with IANA name Extended_UNIX_Code_Fixed_Width_for_Japanese.
+ //
+ // Used in Japan. Each character is 2 octets.
+ // code set 0: US-ASCII (a single 7-bit byte set)
+ // 1st byte = 00
+ // 2nd byte = 20-7E
+ // code set 1: JIS X0208-1990 (a double 7-bit byte set)
+ // restricted to A0-FF in both bytes
+ // code set 2: Half Width Katakana (a single 7-bit byte set)
+ // 1st byte = 00
+ // 2nd byte = A0-FF
+ // code set 3: JIS X0212-1990 (a double 7-bit byte set)
+ // restricted to A0-FF in
+ // the first byte
+ // and 21-7E in the second byte
+ EUCFixWidJapanese MIB = 19
+
+ // ISO4UnitedKingdom is the MIB identifier with IANA name BS_4730.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO4UnitedKingdom MIB = 20
+
+ // ISO11SwedishForNames is the MIB identifier with IANA name SEN_850200_C.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO11SwedishForNames MIB = 21
+
+ // ISO15Italian is the MIB identifier with IANA name IT.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO15Italian MIB = 22
+
+ // ISO17Spanish is the MIB identifier with IANA name ES.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO17Spanish MIB = 23
+
+ // ISO21German is the MIB identifier with IANA name DIN_66003.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO21German MIB = 24
+
+ // ISO60Norwegian1 is the MIB identifier with IANA name NS_4551-1.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO60Norwegian1 MIB = 25
+
+ // ISO69French is the MIB identifier with IANA name NF_Z_62-010.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO69French MIB = 26
+
+ // ISO10646UTF1 is the MIB identifier with IANA name ISO-10646-UTF-1.
+ //
+ // Universal Transfer Format (1), this is the multibyte
+ // encoding, that subsets ASCII-7. It does not have byte
+ // ordering issues.
+ ISO10646UTF1 MIB = 27
+
+ // ISO646basic1983 is the MIB identifier with IANA name ISO_646.basic:1983.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO646basic1983 MIB = 28
+
+ // INVARIANT is the MIB identifier with IANA name INVARIANT.
+ //
+ // Reference: RFC1345
+ INVARIANT MIB = 29
+
+ // ISO2IntlRefVersion is the MIB identifier with IANA name ISO_646.irv:1983.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO2IntlRefVersion MIB = 30
+
+ // NATSSEFI is the MIB identifier with IANA name NATS-SEFI.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ NATSSEFI MIB = 31
+
+ // NATSSEFIADD is the MIB identifier with IANA name NATS-SEFI-ADD.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ NATSSEFIADD MIB = 32
+
+ // NATSDANO is the MIB identifier with IANA name NATS-DANO.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ NATSDANO MIB = 33
+
+ // NATSDANOADD is the MIB identifier with IANA name NATS-DANO-ADD.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ NATSDANOADD MIB = 34
+
+ // ISO10Swedish is the MIB identifier with IANA name SEN_850200_B.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO10Swedish MIB = 35
+
+ // KSC56011987 is the MIB identifier with IANA name KS_C_5601-1987.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ KSC56011987 MIB = 36
+
+ // ISO2022KR is the MIB identifier with IANA name ISO-2022-KR (MIME: ISO-2022-KR).
+ //
+ // rfc1557 (see also KS_C_5601-1987)
+ // Reference: RFC1557
+ ISO2022KR MIB = 37
+
+ // EUCKR is the MIB identifier with IANA name EUC-KR (MIME: EUC-KR).
+ //
+ // rfc1557 (see also KS_C_5861-1992)
+ // Reference: RFC1557
+ EUCKR MIB = 38
+
+ // ISO2022JP is the MIB identifier with IANA name ISO-2022-JP (MIME: ISO-2022-JP).
+ //
+ // rfc1468 (see also rfc2237 )
+ // Reference: RFC1468
+ ISO2022JP MIB = 39
+
+ // ISO2022JP2 is the MIB identifier with IANA name ISO-2022-JP-2 (MIME: ISO-2022-JP-2).
+ //
+ // rfc1554
+ // Reference: RFC1554
+ ISO2022JP2 MIB = 40
+
+ // ISO13JISC6220jp is the MIB identifier with IANA name JIS_C6220-1969-jp.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO13JISC6220jp MIB = 41
+
+ // ISO14JISC6220ro is the MIB identifier with IANA name JIS_C6220-1969-ro.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO14JISC6220ro MIB = 42
+
+ // ISO16Portuguese is the MIB identifier with IANA name PT.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO16Portuguese MIB = 43
+
+ // ISO18Greek7Old is the MIB identifier with IANA name greek7-old.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO18Greek7Old MIB = 44
+
+ // ISO19LatinGreek is the MIB identifier with IANA name latin-greek.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO19LatinGreek MIB = 45
+
+ // ISO25French is the MIB identifier with IANA name NF_Z_62-010_(1973).
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO25French MIB = 46
+
+ // ISO27LatinGreek1 is the MIB identifier with IANA name Latin-greek-1.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO27LatinGreek1 MIB = 47
+
+ // ISO5427Cyrillic is the MIB identifier with IANA name ISO_5427.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO5427Cyrillic MIB = 48
+
+ // ISO42JISC62261978 is the MIB identifier with IANA name JIS_C6226-1978.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO42JISC62261978 MIB = 49
+
+ // ISO47BSViewdata is the MIB identifier with IANA name BS_viewdata.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO47BSViewdata MIB = 50
+
+ // ISO49INIS is the MIB identifier with IANA name INIS.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO49INIS MIB = 51
+
+ // ISO50INIS8 is the MIB identifier with IANA name INIS-8.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO50INIS8 MIB = 52
+
+ // ISO51INISCyrillic is the MIB identifier with IANA name INIS-cyrillic.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO51INISCyrillic MIB = 53
+
+ // ISO54271981 is the MIB identifier with IANA name ISO_5427:1981.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO54271981 MIB = 54
+
+ // ISO5428Greek is the MIB identifier with IANA name ISO_5428:1980.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO5428Greek MIB = 55
+
+ // ISO57GB1988 is the MIB identifier with IANA name GB_1988-80.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO57GB1988 MIB = 56
+
+ // ISO58GB231280 is the MIB identifier with IANA name GB_2312-80.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO58GB231280 MIB = 57
+
+ // ISO61Norwegian2 is the MIB identifier with IANA name NS_4551-2.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO61Norwegian2 MIB = 58
+
+ // ISO70VideotexSupp1 is the MIB identifier with IANA name videotex-suppl.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO70VideotexSupp1 MIB = 59
+
+ // ISO84Portuguese2 is the MIB identifier with IANA name PT2.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO84Portuguese2 MIB = 60
+
+ // ISO85Spanish2 is the MIB identifier with IANA name ES2.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO85Spanish2 MIB = 61
+
+ // ISO86Hungarian is the MIB identifier with IANA name MSZ_7795.3.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO86Hungarian MIB = 62
+
+ // ISO87JISX0208 is the MIB identifier with IANA name JIS_C6226-1983.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO87JISX0208 MIB = 63
+
+ // ISO88Greek7 is the MIB identifier with IANA name greek7.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO88Greek7 MIB = 64
+
+ // ISO89ASMO449 is the MIB identifier with IANA name ASMO_449.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO89ASMO449 MIB = 65
+
+ // ISO90 is the MIB identifier with IANA name iso-ir-90.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO90 MIB = 66
+
+ // ISO91JISC62291984a is the MIB identifier with IANA name JIS_C6229-1984-a.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO91JISC62291984a MIB = 67
+
+ // ISO92JISC62991984b is the MIB identifier with IANA name JIS_C6229-1984-b.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO92JISC62991984b MIB = 68
+
+ // ISO93JIS62291984badd is the MIB identifier with IANA name JIS_C6229-1984-b-add.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO93JIS62291984badd MIB = 69
+
+ // ISO94JIS62291984hand is the MIB identifier with IANA name JIS_C6229-1984-hand.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO94JIS62291984hand MIB = 70
+
+ // ISO95JIS62291984handadd is the MIB identifier with IANA name JIS_C6229-1984-hand-add.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO95JIS62291984handadd MIB = 71
+
+ // ISO96JISC62291984kana is the MIB identifier with IANA name JIS_C6229-1984-kana.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO96JISC62291984kana MIB = 72
+
+ // ISO2033 is the MIB identifier with IANA name ISO_2033-1983.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO2033 MIB = 73
+
+ // ISO99NAPLPS is the MIB identifier with IANA name ANSI_X3.110-1983.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO99NAPLPS MIB = 74
+
+ // ISO102T617bit is the MIB identifier with IANA name T.61-7bit.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO102T617bit MIB = 75
+
+ // ISO103T618bit is the MIB identifier with IANA name T.61-8bit.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO103T618bit MIB = 76
+
+ // ISO111ECMACyrillic is the MIB identifier with IANA name ECMA-cyrillic.
+ //
+ // ISO registry
+ // (formerly ECMA
+ // registry )
+ ISO111ECMACyrillic MIB = 77
+
+ // ISO121Canadian1 is the MIB identifier with IANA name CSA_Z243.4-1985-1.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO121Canadian1 MIB = 78
+
+ // ISO122Canadian2 is the MIB identifier with IANA name CSA_Z243.4-1985-2.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO122Canadian2 MIB = 79
+
+ // ISO123CSAZ24341985gr is the MIB identifier with IANA name CSA_Z243.4-1985-gr.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO123CSAZ24341985gr MIB = 80
+
+ // ISO88596E is the MIB identifier with IANA name ISO_8859-6-E (MIME: ISO-8859-6-E).
+ //
+ // rfc1556
+ // Reference: RFC1556
+ ISO88596E MIB = 81
+
+ // ISO88596I is the MIB identifier with IANA name ISO_8859-6-I (MIME: ISO-8859-6-I).
+ //
+ // rfc1556
+ // Reference: RFC1556
+ ISO88596I MIB = 82
+
+ // ISO128T101G2 is the MIB identifier with IANA name T.101-G2.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO128T101G2 MIB = 83
+
+ // ISO88598E is the MIB identifier with IANA name ISO_8859-8-E (MIME: ISO-8859-8-E).
+ //
+ // rfc1556
+ // Reference: RFC1556
+ ISO88598E MIB = 84
+
+ // ISO88598I is the MIB identifier with IANA name ISO_8859-8-I (MIME: ISO-8859-8-I).
+ //
+ // rfc1556
+ // Reference: RFC1556
+ ISO88598I MIB = 85
+
+ // ISO139CSN369103 is the MIB identifier with IANA name CSN_369103.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO139CSN369103 MIB = 86
+
+ // ISO141JUSIB1002 is the MIB identifier with IANA name JUS_I.B1.002.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO141JUSIB1002 MIB = 87
+
+ // ISO143IECP271 is the MIB identifier with IANA name IEC_P27-1.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO143IECP271 MIB = 88
+
+ // ISO146Serbian is the MIB identifier with IANA name JUS_I.B1.003-serb.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO146Serbian MIB = 89
+
+ // ISO147Macedonian is the MIB identifier with IANA name JUS_I.B1.003-mac.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO147Macedonian MIB = 90
+
+ // ISO150GreekCCITT is the MIB identifier with IANA name greek-ccitt.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO150GreekCCITT MIB = 91
+
+ // ISO151Cuba is the MIB identifier with IANA name NC_NC00-10:81.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO151Cuba MIB = 92
+
+ // ISO6937Add is the MIB identifier with IANA name ISO_6937-2-25.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO6937Add MIB = 93
+
+ // ISO153GOST1976874 is the MIB identifier with IANA name GOST_19768-74.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO153GOST1976874 MIB = 94
+
+ // ISO8859Supp is the MIB identifier with IANA name ISO_8859-supp.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO8859Supp MIB = 95
+
+ // ISO10367Box is the MIB identifier with IANA name ISO_10367-box.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO10367Box MIB = 96
+
+ // ISO158Lap is the MIB identifier with IANA name latin-lap.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO158Lap MIB = 97
+
+ // ISO159JISX02121990 is the MIB identifier with IANA name JIS_X0212-1990.
+ //
+ // ISO-IR: International Register of Escape Sequences
+ // Note: The current registration authority is IPSJ/ITSCJ, Japan.
+ // Reference: RFC1345
+ ISO159JISX02121990 MIB = 98
+
+ // ISO646Danish is the MIB identifier with IANA name DS_2089.
+ //
+ // Danish Standard, DS 2089, February 1974
+ // Reference: RFC1345
+ ISO646Danish MIB = 99
+
+ // USDK is the MIB identifier with IANA name us-dk.
+ //
+ // Reference: RFC1345
+ USDK MIB = 100
+
+ // DKUS is the MIB identifier with IANA name dk-us.
+ //
+ // Reference: RFC1345
+ DKUS MIB = 101
+
+ // KSC5636 is the MIB identifier with IANA name KSC5636.
+ //
+ // Reference: RFC1345
+ KSC5636 MIB = 102
+
+ // Unicode11UTF7 is the MIB identifier with IANA name UNICODE-1-1-UTF-7.
+ //
+ // rfc1642
+ // Reference: RFC1642
+ Unicode11UTF7 MIB = 103
+
+ // ISO2022CN is the MIB identifier with IANA name ISO-2022-CN.
+ //
+ // rfc1922
+ // Reference: RFC1922
+ ISO2022CN MIB = 104
+
+ // ISO2022CNEXT is the MIB identifier with IANA name ISO-2022-CN-EXT.
+ //
+ // rfc1922
+ // Reference: RFC1922
+ ISO2022CNEXT MIB = 105
+
+ // UTF8 is the MIB identifier with IANA name UTF-8.
+ //
+ // rfc3629
+ // Reference: RFC3629
+ UTF8 MIB = 106
+
+ // ISO885913 is the MIB identifier with IANA name ISO-8859-13.
+ //
+ // ISO See http://www.iana.org/assignments/charset-reg/ISO-8859-13 http://www.iana.org/assignments/charset-reg/ISO-8859-13
+ ISO885913 MIB = 109
+
+ // ISO885914 is the MIB identifier with IANA name ISO-8859-14.
+ //
+ // ISO See http://www.iana.org/assignments/charset-reg/ISO-8859-14
+ ISO885914 MIB = 110
+
+ // ISO885915 is the MIB identifier with IANA name ISO-8859-15.
+ //
+ // ISO
+ // Please see: http://www.iana.org/assignments/charset-reg/ISO-8859-15
+ ISO885915 MIB = 111
+
+ // ISO885916 is the MIB identifier with IANA name ISO-8859-16.
+ //
+ // ISO
+ ISO885916 MIB = 112
+
+ // GBK is the MIB identifier with IANA name GBK.
+ //
+ // Chinese IT Standardization Technical Committee
+ // Please see: http://www.iana.org/assignments/charset-reg/GBK
+ GBK MIB = 113
+
+ // GB18030 is the MIB identifier with IANA name GB18030.
+ //
+ // Chinese IT Standardization Technical Committee
+ // Please see: http://www.iana.org/assignments/charset-reg/GB18030
+ GB18030 MIB = 114
+
+ // OSDEBCDICDF0415 is the MIB identifier with IANA name OSD_EBCDIC_DF04_15.
+ //
+ // Fujitsu-Siemens standard mainframe EBCDIC encoding
+ // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-15
+ OSDEBCDICDF0415 MIB = 115
+
+ // OSDEBCDICDF03IRV is the MIB identifier with IANA name OSD_EBCDIC_DF03_IRV.
+ //
+ // Fujitsu-Siemens standard mainframe EBCDIC encoding
+ // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF03-IRV
+ OSDEBCDICDF03IRV MIB = 116
+
+ // OSDEBCDICDF041 is the MIB identifier with IANA name OSD_EBCDIC_DF04_1.
+ //
+ // Fujitsu-Siemens standard mainframe EBCDIC encoding
+ // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-1
+ OSDEBCDICDF041 MIB = 117
+
+ // ISO115481 is the MIB identifier with IANA name ISO-11548-1.
+ //
+ // See http://www.iana.org/assignments/charset-reg/ISO-11548-1
+ ISO115481 MIB = 118
+
+ // KZ1048 is the MIB identifier with IANA name KZ-1048.
+ //
+ // See http://www.iana.org/assignments/charset-reg/KZ-1048
+ KZ1048 MIB = 119
+
+ // Unicode is the MIB identifier with IANA name ISO-10646-UCS-2.
+ //
+ // the 2-octet Basic Multilingual Plane, aka Unicode
+ // this needs to specify network byte order: the standard
+ // does not specify (it is a 16-bit integer space)
+ Unicode MIB = 1000
+
+ // UCS4 is the MIB identifier with IANA name ISO-10646-UCS-4.
+ //
+ // the full code space. (same comment about byte order,
+ // these are 31-bit numbers.
+ UCS4 MIB = 1001
+
+ // UnicodeASCII is the MIB identifier with IANA name ISO-10646-UCS-Basic.
+ //
+ // ASCII subset of Unicode. Basic Latin = collection 1
+ // See ISO 10646, Appendix A
+ UnicodeASCII MIB = 1002
+
+ // UnicodeLatin1 is the MIB identifier with IANA name ISO-10646-Unicode-Latin1.
+ //
+ // ISO Latin-1 subset of Unicode. Basic Latin and Latin-1
+ // Supplement = collections 1 and 2. See ISO 10646,
+ // Appendix A. See rfc1815 .
+ UnicodeLatin1 MIB = 1003
+
+ // UnicodeJapanese is the MIB identifier with IANA name ISO-10646-J-1.
+ //
+ // ISO 10646 Japanese, see rfc1815 .
+ UnicodeJapanese MIB = 1004
+
+ // UnicodeIBM1261 is the MIB identifier with IANA name ISO-Unicode-IBM-1261.
+ //
+ // IBM Latin-2, -3, -5, Extended Presentation Set, GCSGID: 1261
+ UnicodeIBM1261 MIB = 1005
+
+ // UnicodeIBM1268 is the MIB identifier with IANA name ISO-Unicode-IBM-1268.
+ //
+ // IBM Latin-4 Extended Presentation Set, GCSGID: 1268
+ UnicodeIBM1268 MIB = 1006
+
+ // UnicodeIBM1276 is the MIB identifier with IANA name ISO-Unicode-IBM-1276.
+ //
+ // IBM Cyrillic Greek Extended Presentation Set, GCSGID: 1276
+ UnicodeIBM1276 MIB = 1007
+
+ // UnicodeIBM1264 is the MIB identifier with IANA name ISO-Unicode-IBM-1264.
+ //
+ // IBM Arabic Presentation Set, GCSGID: 1264
+ UnicodeIBM1264 MIB = 1008
+
+ // UnicodeIBM1265 is the MIB identifier with IANA name ISO-Unicode-IBM-1265.
+ //
+ // IBM Hebrew Presentation Set, GCSGID: 1265
+ UnicodeIBM1265 MIB = 1009
+
+ // Unicode11 is the MIB identifier with IANA name UNICODE-1-1.
+ //
+ // rfc1641
+ // Reference: RFC1641
+ Unicode11 MIB = 1010
+
+ // SCSU is the MIB identifier with IANA name SCSU.
+ //
+ // SCSU See http://www.iana.org/assignments/charset-reg/SCSU
+ SCSU MIB = 1011
+
+ // UTF7 is the MIB identifier with IANA name UTF-7.
+ //
+ // rfc2152
+ // Reference: RFC2152
+ UTF7 MIB = 1012
+
+ // UTF16BE is the MIB identifier with IANA name UTF-16BE.
+ //
+ // rfc2781
+ // Reference: RFC2781
+ UTF16BE MIB = 1013
+
+ // UTF16LE is the MIB identifier with IANA name UTF-16LE.
+ //
+ // rfc2781
+ // Reference: RFC2781
+ UTF16LE MIB = 1014
+
+ // UTF16 is the MIB identifier with IANA name UTF-16.
+ //
+ // rfc2781
+ // Reference: RFC2781
+ UTF16 MIB = 1015
+
+ // CESU8 is the MIB identifier with IANA name CESU-8.
+ //
+ // http://www.unicode.org/unicode/reports/tr26
+ CESU8 MIB = 1016
+
+ // UTF32 is the MIB identifier with IANA name UTF-32.
+ //
+ // http://www.unicode.org/unicode/reports/tr19/
+ UTF32 MIB = 1017
+
+ // UTF32BE is the MIB identifier with IANA name UTF-32BE.
+ //
+ // http://www.unicode.org/unicode/reports/tr19/
+ UTF32BE MIB = 1018
+
+ // UTF32LE is the MIB identifier with IANA name UTF-32LE.
+ //
+ // http://www.unicode.org/unicode/reports/tr19/
+ UTF32LE MIB = 1019
+
+ // BOCU1 is the MIB identifier with IANA name BOCU-1.
+ //
+ // http://www.unicode.org/notes/tn6/
+ BOCU1 MIB = 1020
+
+ // Windows30Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.0-Latin-1.
+ //
+ // Extended ISO 8859-1 Latin-1 for Windows 3.0.
+ // PCL Symbol Set id: 9U
+ Windows30Latin1 MIB = 2000
+
+ // Windows31Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.1-Latin-1.
+ //
+ // Extended ISO 8859-1 Latin-1 for Windows 3.1.
+ // PCL Symbol Set id: 19U
+ Windows31Latin1 MIB = 2001
+
+ // Windows31Latin2 is the MIB identifier with IANA name ISO-8859-2-Windows-Latin-2.
+ //
+ // Extended ISO 8859-2. Latin-2 for Windows 3.1.
+ // PCL Symbol Set id: 9E
+ Windows31Latin2 MIB = 2002
+
+ // Windows31Latin5 is the MIB identifier with IANA name ISO-8859-9-Windows-Latin-5.
+ //
+ // Extended ISO 8859-9. Latin-5 for Windows 3.1
+ // PCL Symbol Set id: 5T
+ Windows31Latin5 MIB = 2003
+
+ // HPRoman8 is the MIB identifier with IANA name hp-roman8.
+ //
+ // LaserJet IIP Printer User's Manual,
+ // HP part no 33471-90901, Hewlet-Packard, June 1989.
+ // Reference: RFC1345
+ HPRoman8 MIB = 2004
+
+ // AdobeStandardEncoding is the MIB identifier with IANA name Adobe-Standard-Encoding.
+ //
+ // PostScript Language Reference Manual
+ // PCL Symbol Set id: 10J
+ AdobeStandardEncoding MIB = 2005
+
+ // VenturaUS is the MIB identifier with IANA name Ventura-US.
+ //
+ // Ventura US. ASCII plus characters typically used in
+ // publishing, like pilcrow, copyright, registered, trade mark,
+ // section, dagger, and double dagger in the range A0 (hex)
+ // to FF (hex).
+ // PCL Symbol Set id: 14J
+ VenturaUS MIB = 2006
+
+ // VenturaInternational is the MIB identifier with IANA name Ventura-International.
+ //
+ // Ventura International. ASCII plus coded characters similar
+ // to Roman8.
+ // PCL Symbol Set id: 13J
+ VenturaInternational MIB = 2007
+
+ // DECMCS is the MIB identifier with IANA name DEC-MCS.
+ //
+ // VAX/VMS User's Manual,
+ // Order Number: AI-Y517A-TE, April 1986.
+ // Reference: RFC1345
+ DECMCS MIB = 2008
+
+ // PC850Multilingual is the MIB identifier with IANA name IBM850.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ PC850Multilingual MIB = 2009
+
+ // PC8DanishNorwegian is the MIB identifier with IANA name PC8-Danish-Norwegian.
+ //
+ // PC Danish Norwegian
+ // 8-bit PC set for Danish Norwegian
+ // PCL Symbol Set id: 11U
+ PC8DanishNorwegian MIB = 2012
+
+ // PC862LatinHebrew is the MIB identifier with IANA name IBM862.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ PC862LatinHebrew MIB = 2013
+
+ // PC8Turkish is the MIB identifier with IANA name PC8-Turkish.
+ //
+ // PC Latin Turkish. PCL Symbol Set id: 9T
+ PC8Turkish MIB = 2014
+
+ // IBMSymbols is the MIB identifier with IANA name IBM-Symbols.
+ //
+ // Presentation Set, CPGID: 259
+ IBMSymbols MIB = 2015
+
+ // IBMThai is the MIB identifier with IANA name IBM-Thai.
+ //
+ // Presentation Set, CPGID: 838
+ IBMThai MIB = 2016
+
+ // HPLegal is the MIB identifier with IANA name HP-Legal.
+ //
+ // PCL 5 Comparison Guide, Hewlett-Packard,
+ // HP part number 5961-0510, October 1992
+ // PCL Symbol Set id: 1U
+ HPLegal MIB = 2017
+
+ // HPPiFont is the MIB identifier with IANA name HP-Pi-font.
+ //
+ // PCL 5 Comparison Guide, Hewlett-Packard,
+ // HP part number 5961-0510, October 1992
+ // PCL Symbol Set id: 15U
+ HPPiFont MIB = 2018
+
+ // HPMath8 is the MIB identifier with IANA name HP-Math8.
+ //
+ // PCL 5 Comparison Guide, Hewlett-Packard,
+ // HP part number 5961-0510, October 1992
+ // PCL Symbol Set id: 8M
+ HPMath8 MIB = 2019
+
+ // HPPSMath is the MIB identifier with IANA name Adobe-Symbol-Encoding.
+ //
+ // PostScript Language Reference Manual
+ // PCL Symbol Set id: 5M
+ HPPSMath MIB = 2020
+
+ // HPDesktop is the MIB identifier with IANA name HP-DeskTop.
+ //
+ // PCL 5 Comparison Guide, Hewlett-Packard,
+ // HP part number 5961-0510, October 1992
+ // PCL Symbol Set id: 7J
+ HPDesktop MIB = 2021
+
+ // VenturaMath is the MIB identifier with IANA name Ventura-Math.
+ //
+ // PCL 5 Comparison Guide, Hewlett-Packard,
+ // HP part number 5961-0510, October 1992
+ // PCL Symbol Set id: 6M
+ VenturaMath MIB = 2022
+
+ // MicrosoftPublishing is the MIB identifier with IANA name Microsoft-Publishing.
+ //
+ // PCL 5 Comparison Guide, Hewlett-Packard,
+ // HP part number 5961-0510, October 1992
+ // PCL Symbol Set id: 6J
+ MicrosoftPublishing MIB = 2023
+
+ // Windows31J is the MIB identifier with IANA name Windows-31J.
+ //
+ // Windows Japanese. A further extension of Shift_JIS
+ // to include NEC special characters (Row 13), NEC
+ // selection of IBM extensions (Rows 89 to 92), and IBM
+ // extensions (Rows 115 to 119). The CCS's are
+ // JIS X0201:1997, JIS X0208:1997, and these extensions.
+ // This charset can be used for the top-level media type "text",
+ // but it is of limited or specialized use (see rfc2278 ).
+ // PCL Symbol Set id: 19K
+ Windows31J MIB = 2024
+
+ // GB2312 is the MIB identifier with IANA name GB2312 (MIME: GB2312).
+ //
+ // Chinese for People's Republic of China (PRC) mixed one byte,
+ // two byte set:
+ // 20-7E = one byte ASCII
+ // A1-FE = two byte PRC Kanji
+ // See GB 2312-80
+ // PCL Symbol Set Id: 18C
+ GB2312 MIB = 2025
+
+ // Big5 is the MIB identifier with IANA name Big5 (MIME: Big5).
+ //
+ // Chinese for Taiwan Multi-byte set.
+ // PCL Symbol Set Id: 18T
+ Big5 MIB = 2026
+
+ // Macintosh is the MIB identifier with IANA name macintosh.
+ //
+ // The Unicode Standard ver1.0, ISBN 0-201-56788-1, Oct 1991
+ // Reference: RFC1345
+ Macintosh MIB = 2027
+
+ // IBM037 is the MIB identifier with IANA name IBM037.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM037 MIB = 2028
+
+ // IBM038 is the MIB identifier with IANA name IBM038.
+ //
+ // IBM 3174 Character Set Ref, GA27-3831-02, March 1990
+ // Reference: RFC1345
+ IBM038 MIB = 2029
+
+ // IBM273 is the MIB identifier with IANA name IBM273.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM273 MIB = 2030
+
+ // IBM274 is the MIB identifier with IANA name IBM274.
+ //
+ // IBM 3174 Character Set Ref, GA27-3831-02, March 1990
+ // Reference: RFC1345
+ IBM274 MIB = 2031
+
+ // IBM275 is the MIB identifier with IANA name IBM275.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM275 MIB = 2032
+
+ // IBM277 is the MIB identifier with IANA name IBM277.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM277 MIB = 2033
+
+ // IBM278 is the MIB identifier with IANA name IBM278.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM278 MIB = 2034
+
+ // IBM280 is the MIB identifier with IANA name IBM280.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM280 MIB = 2035
+
+ // IBM281 is the MIB identifier with IANA name IBM281.
+ //
+ // IBM 3174 Character Set Ref, GA27-3831-02, March 1990
+ // Reference: RFC1345
+ IBM281 MIB = 2036
+
+ // IBM284 is the MIB identifier with IANA name IBM284.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM284 MIB = 2037
+
+ // IBM285 is the MIB identifier with IANA name IBM285.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM285 MIB = 2038
+
+ // IBM290 is the MIB identifier with IANA name IBM290.
+ //
+ // IBM 3174 Character Set Ref, GA27-3831-02, March 1990
+ // Reference: RFC1345
+ IBM290 MIB = 2039
+
+ // IBM297 is the MIB identifier with IANA name IBM297.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM297 MIB = 2040
+
+ // IBM420 is the MIB identifier with IANA name IBM420.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990,
+ // IBM NLS RM p 11-11
+ // Reference: RFC1345
+ IBM420 MIB = 2041
+
+ // IBM423 is the MIB identifier with IANA name IBM423.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM423 MIB = 2042
+
+ // IBM424 is the MIB identifier with IANA name IBM424.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM424 MIB = 2043
+
+ // PC8CodePage437 is the MIB identifier with IANA name IBM437.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ PC8CodePage437 MIB = 2011
+
+ // IBM500 is the MIB identifier with IANA name IBM500.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM500 MIB = 2044
+
+ // IBM851 is the MIB identifier with IANA name IBM851.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM851 MIB = 2045
+
+ // PCp852 is the MIB identifier with IANA name IBM852.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ PCp852 MIB = 2010
+
+ // IBM855 is the MIB identifier with IANA name IBM855.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM855 MIB = 2046
+
+ // IBM857 is the MIB identifier with IANA name IBM857.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM857 MIB = 2047
+
+ // IBM860 is the MIB identifier with IANA name IBM860.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM860 MIB = 2048
+
+ // IBM861 is the MIB identifier with IANA name IBM861.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM861 MIB = 2049
+
+ // IBM863 is the MIB identifier with IANA name IBM863.
+ //
+ // IBM Keyboard layouts and code pages, PN 07G4586 June 1991
+ // Reference: RFC1345
+ IBM863 MIB = 2050
+
+ // IBM864 is the MIB identifier with IANA name IBM864.
+ //
+ // IBM Keyboard layouts and code pages, PN 07G4586 June 1991
+ // Reference: RFC1345
+ IBM864 MIB = 2051
+
+ // IBM865 is the MIB identifier with IANA name IBM865.
+ //
+ // IBM DOS 3.3 Ref (Abridged), 94X9575 (Feb 1987)
+ // Reference: RFC1345
+ IBM865 MIB = 2052
+
+ // IBM868 is the MIB identifier with IANA name IBM868.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM868 MIB = 2053
+
+ // IBM869 is the MIB identifier with IANA name IBM869.
+ //
+ // IBM Keyboard layouts and code pages, PN 07G4586 June 1991
+ // Reference: RFC1345
+ IBM869 MIB = 2054
+
+ // IBM870 is the MIB identifier with IANA name IBM870.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM870 MIB = 2055
+
+ // IBM871 is the MIB identifier with IANA name IBM871.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM871 MIB = 2056
+
+ // IBM880 is the MIB identifier with IANA name IBM880.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM880 MIB = 2057
+
+ // IBM891 is the MIB identifier with IANA name IBM891.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM891 MIB = 2058
+
+ // IBM903 is the MIB identifier with IANA name IBM903.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM903 MIB = 2059
+
+ // IBBM904 is the MIB identifier with IANA name IBM904.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBBM904 MIB = 2060
+
+ // IBM905 is the MIB identifier with IANA name IBM905.
+ //
+ // IBM 3174 Character Set Ref, GA27-3831-02, March 1990
+ // Reference: RFC1345
+ IBM905 MIB = 2061
+
+ // IBM918 is the MIB identifier with IANA name IBM918.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM918 MIB = 2062
+
+ // IBM1026 is the MIB identifier with IANA name IBM1026.
+ //
+ // IBM NLS RM Vol2 SE09-8002-01, March 1990
+ // Reference: RFC1345
+ IBM1026 MIB = 2063
+
+ // IBMEBCDICATDE is the MIB identifier with IANA name EBCDIC-AT-DE.
+ //
+ // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+ // Reference: RFC1345
+ IBMEBCDICATDE MIB = 2064
+
+ // EBCDICATDEA is the MIB identifier with IANA name EBCDIC-AT-DE-A.
+ //
+ // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+ // Reference: RFC1345
+ EBCDICATDEA MIB = 2065
+
+ // EBCDICCAFR is the MIB identifier with IANA name EBCDIC-CA-FR.
+ //
+ // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+ // Reference: RFC1345
+ EBCDICCAFR MIB = 2066
+
+ // EBCDICDKNO is the MIB identifier with IANA name EBCDIC-DK-NO.
+ //
+ // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+ // Reference: RFC1345
+ EBCDICDKNO MIB = 2067
+
+ // EBCDICDKNOA is the MIB identifier with IANA name EBCDIC-DK-NO-A.
+ //
+ // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+ // Reference: RFC1345
+ EBCDICDKNOA MIB = 2068
+
+ // EBCDICFISE is the MIB identifier with IANA name EBCDIC-FI-SE.
+ //
+ // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+ // Reference: RFC1345
+ EBCDICFISE MIB = 2069
+
+ // EBCDICFISEA is the MIB identifier with IANA name EBCDIC-FI-SE-A.
+ //
+ // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+ // Reference: RFC1345
+ EBCDICFISEA MIB = 2070
+
+ // EBCDICFR is the MIB identifier with IANA name EBCDIC-FR.
+ //
+ // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+ // Reference: RFC1345
+ EBCDICFR MIB = 2071
+
+ // EBCDICIT is the MIB identifier with IANA name EBCDIC-IT.
+ //
+ // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+ // Reference: RFC1345
+ EBCDICIT MIB = 2072
+
+ // EBCDICPT is the MIB identifier with IANA name EBCDIC-PT.
+ //
+ // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+ // Reference: RFC1345
+ EBCDICPT MIB = 2073
+
+ // EBCDICES is the MIB identifier with IANA name EBCDIC-ES.
+ //
+ // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+ // Reference: RFC1345
+ EBCDICES MIB = 2074
+
+ // EBCDICESA is the MIB identifier with IANA name EBCDIC-ES-A.
+ //
+ // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+ // Reference: RFC1345
+ EBCDICESA MIB = 2075
+
+ // EBCDICESS is the MIB identifier with IANA name EBCDIC-ES-S.
+ //
+ // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+ // Reference: RFC1345
+ EBCDICESS MIB = 2076
+
+ // EBCDICUK is the MIB identifier with IANA name EBCDIC-UK.
+ //
+ // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+ // Reference: RFC1345
+ EBCDICUK MIB = 2077
+
+ // EBCDICUS is the MIB identifier with IANA name EBCDIC-US.
+ //
+ // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+ // Reference: RFC1345
+ EBCDICUS MIB = 2078
+
+ // Unknown8BiT is the MIB identifier with IANA name UNKNOWN-8BIT.
+ //
+ // Reference: RFC1428
+ Unknown8BiT MIB = 2079
+
+ // Mnemonic is the MIB identifier with IANA name MNEMONIC.
+ //
+ // rfc1345 , also known as "mnemonic+ascii+38"
+ // Reference: RFC1345
+ Mnemonic MIB = 2080
+
+ // Mnem is the MIB identifier with IANA name MNEM.
+ //
+ // rfc1345 , also known as "mnemonic+ascii+8200"
+ // Reference: RFC1345
+ Mnem MIB = 2081
+
+ // VISCII is the MIB identifier with IANA name VISCII.
+ //
+ // rfc1456
+ // Reference: RFC1456
+ VISCII MIB = 2082
+
+ // VIQR is the MIB identifier with IANA name VIQR.
+ //
+ // rfc1456
+ // Reference: RFC1456
+ VIQR MIB = 2083
+
+ // KOI8R is the MIB identifier with IANA name KOI8-R (MIME: KOI8-R).
+ //
+ // rfc1489 , based on GOST-19768-74, ISO-6937/8,
+ // INIS-Cyrillic, ISO-5427.
+ // Reference: RFC1489
+ KOI8R MIB = 2084
+
+ // HZGB2312 is the MIB identifier with IANA name HZ-GB-2312.
+ //
+ // rfc1842 , rfc1843 rfc1843 rfc1842
+ HZGB2312 MIB = 2085
+
+ // IBM866 is the MIB identifier with IANA name IBM866.
+ //
+ // IBM NLDG Volume 2 (SE09-8002-03) August 1994
+ IBM866 MIB = 2086
+
+ // PC775Baltic is the MIB identifier with IANA name IBM775.
+ //
+ // HP PCL 5 Comparison Guide (P/N 5021-0329) pp B-13, 1996
+ PC775Baltic MIB = 2087
+
+ // KOI8U is the MIB identifier with IANA name KOI8-U.
+ //
+ // rfc2319
+ // Reference: RFC2319
+ KOI8U MIB = 2088
+
+ // IBM00858 is the MIB identifier with IANA name IBM00858.
+ //
+ // IBM See http://www.iana.org/assignments/charset-reg/IBM00858
+ IBM00858 MIB = 2089
+
+ // IBM00924 is the MIB identifier with IANA name IBM00924.
+ //
+ // IBM See http://www.iana.org/assignments/charset-reg/IBM00924
+ IBM00924 MIB = 2090
+
+ // IBM01140 is the MIB identifier with IANA name IBM01140.
+ //
+ // IBM See http://www.iana.org/assignments/charset-reg/IBM01140
+ IBM01140 MIB = 2091
+
+ // IBM01141 is the MIB identifier with IANA name IBM01141.
+ //
+ // IBM See http://www.iana.org/assignments/charset-reg/IBM01141
+ IBM01141 MIB = 2092
+
+ // IBM01142 is the MIB identifier with IANA name IBM01142.
+ //
+ // IBM See http://www.iana.org/assignments/charset-reg/IBM01142
+ IBM01142 MIB = 2093
+
+ // IBM01143 is the MIB identifier with IANA name IBM01143.
+ //
+ // IBM See http://www.iana.org/assignments/charset-reg/IBM01143
+ IBM01143 MIB = 2094
+
+ // IBM01144 is the MIB identifier with IANA name IBM01144.
+ //
+ // IBM See http://www.iana.org/assignments/charset-reg/IBM01144
+ IBM01144 MIB = 2095
+
+ // IBM01145 is the MIB identifier with IANA name IBM01145.
+ //
+ // IBM See http://www.iana.org/assignments/charset-reg/IBM01145
+ IBM01145 MIB = 2096
+
+ // IBM01146 is the MIB identifier with IANA name IBM01146.
+ //
+ // IBM See http://www.iana.org/assignments/charset-reg/IBM01146
+ IBM01146 MIB = 2097
+
+ // IBM01147 is the MIB identifier with IANA name IBM01147.
+ //
+ // IBM See http://www.iana.org/assignments/charset-reg/IBM01147
+ IBM01147 MIB = 2098
+
+ // IBM01148 is the MIB identifier with IANA name IBM01148.
+ //
+ // IBM See http://www.iana.org/assignments/charset-reg/IBM01148
+ IBM01148 MIB = 2099
+
+ // IBM01149 is the MIB identifier with IANA name IBM01149.
+ //
+ // IBM See http://www.iana.org/assignments/charset-reg/IBM01149
+ IBM01149 MIB = 2100
+
+ // Big5HKSCS is the MIB identifier with IANA name Big5-HKSCS.
+ //
+ // See http://www.iana.org/assignments/charset-reg/Big5-HKSCS
+ Big5HKSCS MIB = 2101
+
+ // IBM1047 is the MIB identifier with IANA name IBM1047.
+ //
+ // IBM1047 (EBCDIC Latin 1/Open Systems) http://www-1.ibm.com/servers/eserver/iseries/software/globalization/pdf/cp01047z.pdf
+ IBM1047 MIB = 2102
+
+ // PTCP154 is the MIB identifier with IANA name PTCP154.
+ //
+ // See http://www.iana.org/assignments/charset-reg/PTCP154
+ PTCP154 MIB = 2103
+
+ // Amiga1251 is the MIB identifier with IANA name Amiga-1251.
+ //
+ // See http://www.amiga.ultranet.ru/Amiga-1251.html
+ Amiga1251 MIB = 2104
+
+ // KOI7switched is the MIB identifier with IANA name KOI7-switched.
+ //
+ // See http://www.iana.org/assignments/charset-reg/KOI7-switched
+ KOI7switched MIB = 2105
+
+ // BRF is the MIB identifier with IANA name BRF.
+ //
+ // See http://www.iana.org/assignments/charset-reg/BRF
+ BRF MIB = 2106
+
+ // TSCII is the MIB identifier with IANA name TSCII.
+ //
+ // See http://www.iana.org/assignments/charset-reg/TSCII
+ TSCII MIB = 2107
+
+ // CP51932 is the MIB identifier with IANA name CP51932.
+ //
+ // See http://www.iana.org/assignments/charset-reg/CP51932
+ CP51932 MIB = 2108
+
+ // Windows874 is the MIB identifier with IANA name windows-874.
+ //
+ // See http://www.iana.org/assignments/charset-reg/windows-874
+ Windows874 MIB = 2109
+
+ // Windows1250 is the MIB identifier with IANA name windows-1250.
+ //
+ // Microsoft http://www.iana.org/assignments/charset-reg/windows-1250
+ Windows1250 MIB = 2250
+
+ // Windows1251 is the MIB identifier with IANA name windows-1251.
+ //
+ // Microsoft http://www.iana.org/assignments/charset-reg/windows-1251
+ Windows1251 MIB = 2251
+
+ // Windows1252 is the MIB identifier with IANA name windows-1252.
+ //
+ // Microsoft http://www.iana.org/assignments/charset-reg/windows-1252
+ Windows1252 MIB = 2252
+
+ // Windows1253 is the MIB identifier with IANA name windows-1253.
+ //
+ // Microsoft http://www.iana.org/assignments/charset-reg/windows-1253
+ Windows1253 MIB = 2253
+
+ // Windows1254 is the MIB identifier with IANA name windows-1254.
+ //
+ // Microsoft http://www.iana.org/assignments/charset-reg/windows-1254
+ Windows1254 MIB = 2254
+
+ // Windows1255 is the MIB identifier with IANA name windows-1255.
+ //
+ // Microsoft http://www.iana.org/assignments/charset-reg/windows-1255
+ Windows1255 MIB = 2255
+
+ // Windows1256 is the MIB identifier with IANA name windows-1256.
+ //
+ // Microsoft http://www.iana.org/assignments/charset-reg/windows-1256
+ Windows1256 MIB = 2256
+
+ // Windows1257 is the MIB identifier with IANA name windows-1257.
+ //
+ // Microsoft http://www.iana.org/assignments/charset-reg/windows-1257
+ Windows1257 MIB = 2257
+
+ // Windows1258 is the MIB identifier with IANA name windows-1258.
+ //
+ // Microsoft http://www.iana.org/assignments/charset-reg/windows-1258
+ Windows1258 MIB = 2258
+
+ // TIS620 is the MIB identifier with IANA name TIS-620.
+ //
+ // Thai Industrial Standards Institute (TISI)
+ TIS620 MIB = 2259
+
+ // CP50220 is the MIB identifier with IANA name CP50220.
+ //
+ // See http://www.iana.org/assignments/charset-reg/CP50220
+ CP50220 MIB = 2260
+)
diff --git a/vendor/golang.org/x/text/encoding/internal/internal.go b/vendor/golang.org/x/text/encoding/internal/internal.go
new file mode 100644
index 000000000..75a5fd165
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/internal/internal.go
@@ -0,0 +1,75 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains code that is shared among encoding implementations.
+package internal
+
+import (
+ "golang.org/x/text/encoding"
+ "golang.org/x/text/encoding/internal/identifier"
+ "golang.org/x/text/transform"
+)
+
+// Encoding is an implementation of the Encoding interface that adds the String
+// and ID methods to an existing encoding.
+type Encoding struct {
+ encoding.Encoding
+ Name string
+ MIB identifier.MIB
+}
+
+// _ verifies that Encoding implements identifier.Interface.
+var _ identifier.Interface = (*Encoding)(nil)
+
+func (e *Encoding) String() string {
+ return e.Name
+}
+
+func (e *Encoding) ID() (mib identifier.MIB, other string) {
+ return e.MIB, ""
+}
+
+// SimpleEncoding is an Encoding that combines two Transformers.
+type SimpleEncoding struct {
+ Decoder transform.Transformer
+ Encoder transform.Transformer
+}
+
+func (e *SimpleEncoding) NewDecoder() *encoding.Decoder {
+ return &encoding.Decoder{Transformer: e.Decoder}
+}
+
+func (e *SimpleEncoding) NewEncoder() *encoding.Encoder {
+ return &encoding.Encoder{Transformer: e.Encoder}
+}
+
+// FuncEncoding is an Encoding that combines two functions returning a new
+// Transformer.
+type FuncEncoding struct {
+ Decoder func() transform.Transformer
+ Encoder func() transform.Transformer
+}
+
+func (e FuncEncoding) NewDecoder() *encoding.Decoder {
+ return &encoding.Decoder{Transformer: e.Decoder()}
+}
+
+func (e FuncEncoding) NewEncoder() *encoding.Encoder {
+ return &encoding.Encoder{Transformer: e.Encoder()}
+}
+
+// A RepertoireError indicates a rune is not in the repertoire of a destination
+// encoding. It is associated with an encoding-specific suggested replacement
+// byte.
+type RepertoireError byte
+
+// Error implements the error interrface.
+func (r RepertoireError) Error() string {
+ return "encoding: rune not supported by encoding."
+}
+
+// Replacement returns the replacement string associated with this error.
+func (r RepertoireError) Replacement() byte { return byte(r) }
+
+var ErrASCIIReplacement = RepertoireError(encoding.ASCIISub)
diff --git a/vendor/golang.org/x/text/encoding/unicode/override.go b/vendor/golang.org/x/text/encoding/unicode/override.go
new file mode 100644
index 000000000..35d62fcc9
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/unicode/override.go
@@ -0,0 +1,82 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unicode
+
+import (
+ "golang.org/x/text/transform"
+)
+
+// BOMOverride returns a new decoder transformer that is identical to fallback,
+// except that the presence of a Byte Order Mark at the start of the input
+// causes it to switch to the corresponding Unicode decoding. It will only
+// consider BOMs for UTF-8, UTF-16BE, and UTF-16LE.
+//
+// This differs from using ExpectBOM by allowing a BOM to switch to UTF-8, not
+// just UTF-16 variants, and allowing falling back to any encoding scheme.
+//
+// This technique is recommended by the W3C for use in HTML 5: "For
+// compatibility with deployed content, the byte order mark (also known as BOM)
+// is considered more authoritative than anything else."
+// http://www.w3.org/TR/encoding/#specification-hooks
+//
+// Using BOMOverride is mostly intended for use cases where the first characters
+// of a fallback encoding are known to not be a BOM, for example, for valid HTML
+// and most encodings.
+func BOMOverride(fallback transform.Transformer) transform.Transformer {
+ // TODO: possibly allow a variadic argument of unicode encodings to allow
+ // specifying details of which fallbacks are supported as well as
+ // specifying the details of the implementations. This would also allow for
+ // support for UTF-32, which should not be supported by default.
+ return &bomOverride{fallback: fallback}
+}
+
+type bomOverride struct {
+ fallback transform.Transformer
+ current transform.Transformer
+}
+
+func (d *bomOverride) Reset() {
+ d.current = nil
+ d.fallback.Reset()
+}
+
+var (
+ // TODO: we could use decode functions here, instead of allocating a new
+ // decoder on every NewDecoder as IgnoreBOM decoders can be stateless.
+ utf16le = UTF16(LittleEndian, IgnoreBOM)
+ utf16be = UTF16(BigEndian, IgnoreBOM)
+)
+
+const utf8BOM = "\ufeff"
+
+func (d *bomOverride) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ if d.current != nil {
+ return d.current.Transform(dst, src, atEOF)
+ }
+ if len(src) < 3 && !atEOF {
+ return 0, 0, transform.ErrShortSrc
+ }
+ d.current = d.fallback
+ bomSize := 0
+ if len(src) >= 2 {
+ if src[0] == 0xFF && src[1] == 0xFE {
+ d.current = utf16le.NewDecoder()
+ bomSize = 2
+ } else if src[0] == 0xFE && src[1] == 0xFF {
+ d.current = utf16be.NewDecoder()
+ bomSize = 2
+ } else if len(src) >= 3 &&
+ src[0] == utf8BOM[0] &&
+ src[1] == utf8BOM[1] &&
+ src[2] == utf8BOM[2] {
+ d.current = transform.Nop
+ bomSize = 3
+ }
+ }
+ if bomSize < len(src) {
+ nDst, nSrc, err = d.current.Transform(dst, src[bomSize:], atEOF)
+ }
+ return nDst, nSrc + bomSize, err
+}
diff --git a/vendor/golang.org/x/text/encoding/unicode/unicode.go b/vendor/golang.org/x/text/encoding/unicode/unicode.go
new file mode 100644
index 000000000..579cadfb1
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/unicode/unicode.go
@@ -0,0 +1,434 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package unicode provides Unicode encodings such as UTF-16.
+package unicode // import "golang.org/x/text/encoding/unicode"
+
+import (
+ "errors"
+ "unicode/utf16"
+ "unicode/utf8"
+
+ "golang.org/x/text/encoding"
+ "golang.org/x/text/encoding/internal"
+ "golang.org/x/text/encoding/internal/identifier"
+ "golang.org/x/text/internal/utf8internal"
+ "golang.org/x/text/runes"
+ "golang.org/x/text/transform"
+)
+
+// TODO: I think the Transformers really should return errors on unmatched
+// surrogate pairs and odd numbers of bytes. This is not required by RFC 2781,
+// which leaves it open, but is suggested by WhatWG. It will allow for all error
+// modes as defined by WhatWG: fatal, HTML and Replacement. This would require
+// the introduction of some kind of error type for conveying the erroneous code
+// point.
+
+// UTF8 is the UTF-8 encoding.
+var UTF8 encoding.Encoding = utf8enc
+
+var utf8enc = &internal.Encoding{
+ &internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()},
+ "UTF-8",
+ identifier.UTF8,
+}
+
+type utf8Decoder struct{ transform.NopResetter }
+
+func (utf8Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ var pSrc int // point from which to start copy in src
+ var accept utf8internal.AcceptRange
+
+ // The decoder can only make the input larger, not smaller.
+ n := len(src)
+ if len(dst) < n {
+ err = transform.ErrShortDst
+ n = len(dst)
+ atEOF = false
+ }
+ for nSrc < n {
+ c := src[nSrc]
+ if c < utf8.RuneSelf {
+ nSrc++
+ continue
+ }
+ first := utf8internal.First[c]
+ size := int(first & utf8internal.SizeMask)
+ if first == utf8internal.FirstInvalid {
+ goto handleInvalid // invalid starter byte
+ }
+ accept = utf8internal.AcceptRanges[first>>utf8internal.AcceptShift]
+ if nSrc+size > n {
+ if !atEOF {
+ // We may stop earlier than necessary here if the short sequence
+ // has invalid bytes. Not checking for this simplifies the code
+ // and may avoid duplicate computations in certain conditions.
+ if err == nil {
+ err = transform.ErrShortSrc
+ }
+ break
+ }
+ // Determine the maximal subpart of an ill-formed subsequence.
+ switch {
+ case nSrc+1 >= n || src[nSrc+1] < accept.Lo || accept.Hi < src[nSrc+1]:
+ size = 1
+ case nSrc+2 >= n || src[nSrc+2] < utf8internal.LoCB || utf8internal.HiCB < src[nSrc+2]:
+ size = 2
+ default:
+ size = 3 // As we are short, the maximum is 3.
+ }
+ goto handleInvalid
+ }
+ if c = src[nSrc+1]; c < accept.Lo || accept.Hi < c {
+ size = 1
+ goto handleInvalid // invalid continuation byte
+ } else if size == 2 {
+ } else if c = src[nSrc+2]; c < utf8internal.LoCB || utf8internal.HiCB < c {
+ size = 2
+ goto handleInvalid // invalid continuation byte
+ } else if size == 3 {
+ } else if c = src[nSrc+3]; c < utf8internal.LoCB || utf8internal.HiCB < c {
+ size = 3
+ goto handleInvalid // invalid continuation byte
+ }
+ nSrc += size
+ continue
+
+ handleInvalid:
+ // Copy the scanned input so far.
+ nDst += copy(dst[nDst:], src[pSrc:nSrc])
+
+ // Append RuneError to the destination.
+ const runeError = "\ufffd"
+ if nDst+len(runeError) > len(dst) {
+ return nDst, nSrc, transform.ErrShortDst
+ }
+ nDst += copy(dst[nDst:], runeError)
+
+ // Skip the maximal subpart of an ill-formed subsequence according to
+ // the W3C standard way instead of the Go way. This Transform is
+ // probably the only place in the text repo where it is warranted.
+ nSrc += size
+ pSrc = nSrc
+
+ // Recompute the maximum source length.
+ if sz := len(dst) - nDst; sz < len(src)-nSrc {
+ err = transform.ErrShortDst
+ n = nSrc + sz
+ atEOF = false
+ }
+ }
+ return nDst + copy(dst[nDst:], src[pSrc:nSrc]), nSrc, err
+}
+
+// UTF16 returns a UTF-16 Encoding for the given default endianness and byte
+// order mark (BOM) policy.
+//
+// When decoding from UTF-16 to UTF-8, if the BOMPolicy is IgnoreBOM then
+// neither BOMs U+FEFF nor noncharacters U+FFFE in the input stream will affect
+// the endianness used for decoding, and will instead be output as their
+// standard UTF-8 encodings: "\xef\xbb\xbf" and "\xef\xbf\xbe". If the BOMPolicy
+// is UseBOM or ExpectBOM a staring BOM is not written to the UTF-8 output.
+// Instead, it overrides the default endianness e for the remainder of the
+// transformation. Any subsequent BOMs U+FEFF or noncharacters U+FFFE will not
+// affect the endianness used, and will instead be output as their standard
+// UTF-8 encodings. For UseBOM, if there is no starting BOM, it will proceed
+// with the default Endianness. For ExpectBOM, in that case, the transformation
+// will return early with an ErrMissingBOM error.
+//
+// When encoding from UTF-8 to UTF-16, a BOM will be inserted at the start of
+// the output if the BOMPolicy is UseBOM or ExpectBOM. Otherwise, a BOM will not
+// be inserted. The UTF-8 input does not need to contain a BOM.
+//
+// There is no concept of a 'native' endianness. If the UTF-16 data is produced
+// and consumed in a greater context that implies a certain endianness, use
+// IgnoreBOM. Otherwise, use ExpectBOM and always produce and consume a BOM.
+//
+// In the language of http://www.unicode.org/faq/utf_bom.html#bom10, IgnoreBOM
+// corresponds to "Where the precise type of the data stream is known... the
+// BOM should not be used" and ExpectBOM corresponds to "A particular
+// protocol... may require use of the BOM".
+func UTF16(e Endianness, b BOMPolicy) encoding.Encoding {
+ return utf16Encoding{config{e, b}, mibValue[e][b&bomMask]}
+}
+
+// mibValue maps Endianness and BOMPolicy settings to MIB constants. Note that
+// some configurations map to the same MIB identifier. RFC 2781 has requirements
+// and recommendations. Some of the "configurations" are merely recommendations,
+// so multiple configurations could match.
+var mibValue = map[Endianness][numBOMValues]identifier.MIB{
+ BigEndian: [numBOMValues]identifier.MIB{
+ IgnoreBOM: identifier.UTF16BE,
+ UseBOM: identifier.UTF16, // BigEnding default is preferred by RFC 2781.
+ // TODO: acceptBOM | strictBOM would map to UTF16BE as well.
+ },
+ LittleEndian: [numBOMValues]identifier.MIB{
+ IgnoreBOM: identifier.UTF16LE,
+ UseBOM: identifier.UTF16, // LittleEndian default is allowed and preferred on Windows.
+ // TODO: acceptBOM | strictBOM would map to UTF16LE as well.
+ },
+ // ExpectBOM is not widely used and has no valid MIB identifier.
+}
+
+// All lists a configuration for each IANA-defined UTF-16 variant.
+var All = []encoding.Encoding{
+ UTF8,
+ UTF16(BigEndian, UseBOM),
+ UTF16(BigEndian, IgnoreBOM),
+ UTF16(LittleEndian, IgnoreBOM),
+}
+
+// BOMPolicy is a UTF-16 encoding's byte order mark policy.
+type BOMPolicy uint8
+
+const (
+ writeBOM BOMPolicy = 0x01
+ acceptBOM BOMPolicy = 0x02
+ requireBOM BOMPolicy = 0x04
+ bomMask BOMPolicy = 0x07
+
+ // HACK: numBOMValues == 8 triggers a bug in the 1.4 compiler (cannot have a
+ // map of an array of length 8 of a type that is also used as a key or value
+ // in another map). See golang.org/issue/11354.
+ // TODO: consider changing this value back to 8 if the use of 1.4.* has
+ // been minimized.
+ numBOMValues = 8 + 1
+
+ // IgnoreBOM means to ignore any byte order marks.
+ IgnoreBOM BOMPolicy = 0
+ // Common and RFC 2781-compliant interpretation for UTF-16BE/LE.
+
+ // UseBOM means that the UTF-16 form may start with a byte order mark, which
+ // will be used to override the default encoding.
+ UseBOM BOMPolicy = writeBOM | acceptBOM
+ // Common and RFC 2781-compliant interpretation for UTF-16.
+
+ // ExpectBOM means that the UTF-16 form must start with a byte order mark,
+ // which will be used to override the default encoding.
+ ExpectBOM BOMPolicy = writeBOM | acceptBOM | requireBOM
+ // Used in Java as Unicode (not to be confused with Java's UTF-16) and
+ // ICU's UTF-16,version=1. Not compliant with RFC 2781.
+
+ // TODO (maybe): strictBOM: BOM must match Endianness. This would allow:
+ // - UTF-16(B|L)E,version=1: writeBOM | acceptBOM | requireBOM | strictBOM
+ // (UnicodeBig and UnicodeLittle in Java)
+ // - RFC 2781-compliant, but less common interpretation for UTF-16(B|L)E:
+ // acceptBOM | strictBOM (e.g. assigned to CheckBOM).
+ // This addition would be consistent with supporting ExpectBOM.
+)
+
+// Endianness is a UTF-16 encoding's default endianness.
+type Endianness bool
+
+const (
+ // BigEndian is UTF-16BE.
+ BigEndian Endianness = false
+ // LittleEndian is UTF-16LE.
+ LittleEndian Endianness = true
+)
+
+// ErrMissingBOM means that decoding UTF-16 input with ExpectBOM did not find a
+// starting byte order mark.
+var ErrMissingBOM = errors.New("encoding: missing byte order mark")
+
+type utf16Encoding struct {
+ config
+ mib identifier.MIB
+}
+
+type config struct {
+ endianness Endianness
+ bomPolicy BOMPolicy
+}
+
+func (u utf16Encoding) NewDecoder() *encoding.Decoder {
+ return &encoding.Decoder{Transformer: &utf16Decoder{
+ initial: u.config,
+ current: u.config,
+ }}
+}
+
+func (u utf16Encoding) NewEncoder() *encoding.Encoder {
+ return &encoding.Encoder{Transformer: &utf16Encoder{
+ endianness: u.endianness,
+ initialBOMPolicy: u.bomPolicy,
+ currentBOMPolicy: u.bomPolicy,
+ }}
+}
+
+func (u utf16Encoding) ID() (mib identifier.MIB, other string) {
+ return u.mib, ""
+}
+
+func (u utf16Encoding) String() string {
+ e, b := "B", ""
+ if u.endianness == LittleEndian {
+ e = "L"
+ }
+ switch u.bomPolicy {
+ case ExpectBOM:
+ b = "Expect"
+ case UseBOM:
+ b = "Use"
+ case IgnoreBOM:
+ b = "Ignore"
+ }
+ return "UTF-16" + e + "E (" + b + " BOM)"
+}
+
+type utf16Decoder struct {
+ initial config
+ current config
+}
+
+func (u *utf16Decoder) Reset() {
+ u.current = u.initial
+}
+
+func (u *utf16Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ if len(src) == 0 {
+ if atEOF && u.current.bomPolicy&requireBOM != 0 {
+ return 0, 0, ErrMissingBOM
+ }
+ return 0, 0, nil
+ }
+ if u.current.bomPolicy&acceptBOM != 0 {
+ if len(src) < 2 {
+ return 0, 0, transform.ErrShortSrc
+ }
+ switch {
+ case src[0] == 0xfe && src[1] == 0xff:
+ u.current.endianness = BigEndian
+ nSrc = 2
+ case src[0] == 0xff && src[1] == 0xfe:
+ u.current.endianness = LittleEndian
+ nSrc = 2
+ default:
+ if u.current.bomPolicy&requireBOM != 0 {
+ return 0, 0, ErrMissingBOM
+ }
+ }
+ u.current.bomPolicy = IgnoreBOM
+ }
+
+ var r rune
+ var dSize, sSize int
+ for nSrc < len(src) {
+ if nSrc+1 < len(src) {
+ x := uint16(src[nSrc+0])<<8 | uint16(src[nSrc+1])
+ if u.current.endianness == LittleEndian {
+ x = x>>8 | x<<8
+ }
+ r, sSize = rune(x), 2
+ if utf16.IsSurrogate(r) {
+ if nSrc+3 < len(src) {
+ x = uint16(src[nSrc+2])<<8 | uint16(src[nSrc+3])
+ if u.current.endianness == LittleEndian {
+ x = x>>8 | x<<8
+ }
+ // Save for next iteration if it is not a high surrogate.
+ if isHighSurrogate(rune(x)) {
+ r, sSize = utf16.DecodeRune(r, rune(x)), 4
+ }
+ } else if !atEOF {
+ err = transform.ErrShortSrc
+ break
+ }
+ }
+ if dSize = utf8.RuneLen(r); dSize < 0 {
+ r, dSize = utf8.RuneError, 3
+ }
+ } else if atEOF {
+ // Single trailing byte.
+ r, dSize, sSize = utf8.RuneError, 3, 1
+ } else {
+ err = transform.ErrShortSrc
+ break
+ }
+ if nDst+dSize > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ nDst += utf8.EncodeRune(dst[nDst:], r)
+ nSrc += sSize
+ }
+ return nDst, nSrc, err
+}
+
+func isHighSurrogate(r rune) bool {
+ return 0xDC00 <= r && r <= 0xDFFF
+}
+
+type utf16Encoder struct {
+ endianness Endianness
+ initialBOMPolicy BOMPolicy
+ currentBOMPolicy BOMPolicy
+}
+
+func (u *utf16Encoder) Reset() {
+ u.currentBOMPolicy = u.initialBOMPolicy
+}
+
+func (u *utf16Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ if u.currentBOMPolicy&writeBOM != 0 {
+ if len(dst) < 2 {
+ return 0, 0, transform.ErrShortDst
+ }
+ dst[0], dst[1] = 0xfe, 0xff
+ u.currentBOMPolicy = IgnoreBOM
+ nDst = 2
+ }
+
+ r, size := rune(0), 0
+ for nSrc < len(src) {
+ r = rune(src[nSrc])
+
+ // Decode a 1-byte rune.
+ if r < utf8.RuneSelf {
+ size = 1
+
+ } else {
+ // Decode a multi-byte rune.
+ r, size = utf8.DecodeRune(src[nSrc:])
+ if size == 1 {
+ // All valid runes of size 1 (those below utf8.RuneSelf) were
+ // handled above. We have invalid UTF-8 or we haven't seen the
+ // full character yet.
+ if !atEOF && !utf8.FullRune(src[nSrc:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+ }
+ }
+
+ if r <= 0xffff {
+ if nDst+2 > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ dst[nDst+0] = uint8(r >> 8)
+ dst[nDst+1] = uint8(r)
+ nDst += 2
+ } else {
+ if nDst+4 > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ r1, r2 := utf16.EncodeRune(r)
+ dst[nDst+0] = uint8(r1 >> 8)
+ dst[nDst+1] = uint8(r1)
+ dst[nDst+2] = uint8(r2 >> 8)
+ dst[nDst+3] = uint8(r2)
+ nDst += 4
+ }
+ nSrc += size
+ }
+
+ if u.endianness == LittleEndian {
+ for i := 0; i < nDst; i += 2 {
+ dst[i], dst[i+1] = dst[i+1], dst[i]
+ }
+ }
+ return nDst, nSrc, err
+}
diff --git a/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go b/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go
new file mode 100644
index 000000000..575cea870
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go
@@ -0,0 +1,87 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package utf8internal contains low-level utf8-related constants, tables, etc.
+// that are used internally by the text package.
+package utf8internal
+
+// The default lowest and highest continuation byte.
+const (
+ LoCB = 0x80 // 1000 0000
+ HiCB = 0xBF // 1011 1111
+)
+
+// Constants related to getting information of first bytes of UTF-8 sequences.
+const (
+ // ASCII identifies a UTF-8 byte as ASCII.
+ ASCII = as
+
+ // FirstInvalid indicates a byte is invalid as a first byte of a UTF-8
+ // sequence.
+ FirstInvalid = xx
+
+ // SizeMask is a mask for the size bits. Use use x&SizeMask to get the size.
+ SizeMask = 7
+
+ // AcceptShift is the right-shift count for the first byte info byte to get
+ // the index into the AcceptRanges table. See AcceptRanges.
+ AcceptShift = 4
+
+ // The names of these constants are chosen to give nice alignment in the
+ // table below. The first nibble is an index into acceptRanges or F for
+ // special one-byte cases. The second nibble is the Rune length or the
+ // Status for the special one-byte case.
+ xx = 0xF1 // invalid: size 1
+ as = 0xF0 // ASCII: size 1
+ s1 = 0x02 // accept 0, size 2
+ s2 = 0x13 // accept 1, size 3
+ s3 = 0x03 // accept 0, size 3
+ s4 = 0x23 // accept 2, size 3
+ s5 = 0x34 // accept 3, size 4
+ s6 = 0x04 // accept 0, size 4
+ s7 = 0x44 // accept 4, size 4
+)
+
+// First is information about the first byte in a UTF-8 sequence.
+var First = [256]uint8{
+ // 1 2 3 4 5 6 7 8 9 A B C D E F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F
+ // 1 2 3 4 5 6 7 8 9 A B C D E F
+ xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F
+ xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F
+ xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF
+ xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF
+ xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF
+ s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF
+ s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF
+ s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF
+}
+
+// AcceptRange gives the range of valid values for the second byte in a UTF-8
+// sequence for any value for First that is not ASCII or FirstInvalid.
+type AcceptRange struct {
+ Lo uint8 // lowest value for second byte.
+ Hi uint8 // highest value for second byte.
+}
+
+// AcceptRanges is a slice of AcceptRange values. For a given byte sequence b
+//
+// AcceptRanges[First[b[0]]>>AcceptShift]
+//
+// will give the value of AcceptRange for the multi-byte UTF-8 sequence starting
+// at b[0].
+var AcceptRanges = [...]AcceptRange{
+ 0: {LoCB, HiCB},
+ 1: {0xA0, HiCB},
+ 2: {LoCB, 0x9F},
+ 3: {0x90, HiCB},
+ 4: {LoCB, 0x8F},
+}
diff --git a/vendor/golang.org/x/text/runes/cond.go b/vendor/golang.org/x/text/runes/cond.go
new file mode 100644
index 000000000..df7aa02db
--- /dev/null
+++ b/vendor/golang.org/x/text/runes/cond.go
@@ -0,0 +1,187 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runes
+
+import (
+ "unicode/utf8"
+
+ "golang.org/x/text/transform"
+)
+
+// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is.
+// This is done for various reasons:
+// - To retain the semantics of the Nop transformer: if input is passed to a Nop
+// one would expect it to be unchanged.
+// - It would be very expensive to pass a converted RuneError to a transformer:
+// a transformer might need more source bytes after RuneError, meaning that
+// the only way to pass it safely is to create a new buffer and manage the
+// intermingling of RuneErrors and normal input.
+// - Many transformers leave ill-formed UTF-8 as is, so this is not
+// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a
+// logical consequence of the operation (as for Map) or if it otherwise would
+// pose security concerns (as for Remove).
+// - An alternative would be to return an error on ill-formed UTF-8, but this
+// would be inconsistent with other operations.
+
+// If returns a transformer that applies tIn to consecutive runes for which
+// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset
+// is called on tIn and tNotIn at the start of each run. A Nop transformer will
+// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated
+// to RuneError to determine which transformer to apply, but is passed as is to
+// the respective transformer.
+func If(s Set, tIn, tNotIn transform.Transformer) Transformer {
+ if tIn == nil && tNotIn == nil {
+ return Transformer{transform.Nop}
+ }
+ if tIn == nil {
+ tIn = transform.Nop
+ }
+ if tNotIn == nil {
+ tNotIn = transform.Nop
+ }
+ sIn, ok := tIn.(transform.SpanningTransformer)
+ if !ok {
+ sIn = dummySpan{tIn}
+ }
+ sNotIn, ok := tNotIn.(transform.SpanningTransformer)
+ if !ok {
+ sNotIn = dummySpan{tNotIn}
+ }
+
+ a := &cond{
+ tIn: sIn,
+ tNotIn: sNotIn,
+ f: s.Contains,
+ }
+ a.Reset()
+ return Transformer{a}
+}
+
+type dummySpan struct{ transform.Transformer }
+
+func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) {
+ return 0, transform.ErrEndOfSpan
+}
+
+type cond struct {
+ tIn, tNotIn transform.SpanningTransformer
+ f func(rune) bool
+ check func(rune) bool // current check to perform
+ t transform.SpanningTransformer // current transformer to use
+}
+
+// Reset implements transform.Transformer.
+func (t *cond) Reset() {
+ t.check = t.is
+ t.t = t.tIn
+ t.t.Reset() // notIn will be reset on first usage.
+}
+
+func (t *cond) is(r rune) bool {
+ if t.f(r) {
+ return true
+ }
+ t.check = t.isNot
+ t.t = t.tNotIn
+ t.tNotIn.Reset()
+ return false
+}
+
+func (t *cond) isNot(r rune) bool {
+ if !t.f(r) {
+ return true
+ }
+ t.check = t.is
+ t.t = t.tIn
+ t.tIn.Reset()
+ return false
+}
+
+// This implementation of Span doesn't help all too much, but it needs to be
+// there to satisfy this package's Transformer interface.
+// TODO: there are certainly room for improvements, though. For example, if
+// t.t == transform.Nop (which will a common occurrence) it will save a bundle
+// to special-case that loop.
+func (t *cond) Span(src []byte, atEOF bool) (n int, err error) {
+ p := 0
+ for n < len(src) && err == nil {
+ // Don't process too much at a time as the Spanner that will be
+ // called on this block may terminate early.
+ const maxChunk = 4096
+ max := len(src)
+ if v := n + maxChunk; v < max {
+ max = v
+ }
+ atEnd := false
+ size := 0
+ current := t.t
+ for ; p < max; p += size {
+ r := rune(src[p])
+ if r < utf8.RuneSelf {
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
+ if !atEOF && !utf8.FullRune(src[p:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+ }
+ if !t.check(r) {
+ // The next rune will be the start of a new run.
+ atEnd = true
+ break
+ }
+ }
+ n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src)))
+ n += n2
+ if err2 != nil {
+ return n, err2
+ }
+ // At this point either err != nil or t.check will pass for the rune at p.
+ p = n + size
+ }
+ return n, err
+}
+
+func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ p := 0
+ for nSrc < len(src) && err == nil {
+ // Don't process too much at a time, as the work might be wasted if the
+ // destination buffer isn't large enough to hold the result or a
+ // transform returns an error early.
+ const maxChunk = 4096
+ max := len(src)
+ if n := nSrc + maxChunk; n < len(src) {
+ max = n
+ }
+ atEnd := false
+ size := 0
+ current := t.t
+ for ; p < max; p += size {
+ r := rune(src[p])
+ if r < utf8.RuneSelf {
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
+ if !atEOF && !utf8.FullRune(src[p:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+ }
+ if !t.check(r) {
+ // The next rune will be the start of a new run.
+ atEnd = true
+ break
+ }
+ }
+ nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src)))
+ nDst += nDst2
+ nSrc += nSrc2
+ if err2 != nil {
+ return nDst, nSrc, err2
+ }
+ // At this point either err != nil or t.check will pass for the rune at p.
+ p = nSrc + size
+ }
+ return nDst, nSrc, err
+}
diff --git a/vendor/golang.org/x/text/runes/runes.go b/vendor/golang.org/x/text/runes/runes.go
new file mode 100644
index 000000000..71933696f
--- /dev/null
+++ b/vendor/golang.org/x/text/runes/runes.go
@@ -0,0 +1,355 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package runes provide transforms for UTF-8 encoded text.
+package runes // import "golang.org/x/text/runes"
+
+import (
+ "unicode"
+ "unicode/utf8"
+
+ "golang.org/x/text/transform"
+)
+
+// A Set is a collection of runes.
+type Set interface {
+ // Contains returns true if r is contained in the set.
+ Contains(r rune) bool
+}
+
+type setFunc func(rune) bool
+
+func (s setFunc) Contains(r rune) bool {
+ return s(r)
+}
+
+// Note: using funcs here instead of wrapping types result in cleaner
+// documentation and a smaller API.
+
+// In creates a Set with a Contains method that returns true for all runes in
+// the given RangeTable.
+func In(rt *unicode.RangeTable) Set {
+ return setFunc(func(r rune) bool { return unicode.Is(rt, r) })
+}
+
+// In creates a Set with a Contains method that returns true for all runes not
+// in the given RangeTable.
+func NotIn(rt *unicode.RangeTable) Set {
+ return setFunc(func(r rune) bool { return !unicode.Is(rt, r) })
+}
+
+// Predicate creates a Set with a Contains method that returns f(r).
+func Predicate(f func(rune) bool) Set {
+ return setFunc(f)
+}
+
+// Transformer implements the transform.Transformer interface.
+type Transformer struct {
+ t transform.SpanningTransformer
+}
+
+func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ return t.t.Transform(dst, src, atEOF)
+}
+
+func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) {
+ return t.t.Span(b, atEOF)
+}
+
+func (t Transformer) Reset() { t.t.Reset() }
+
+// Bytes returns a new byte slice with the result of converting b using t. It
+// calls Reset on t. It returns nil if any error was found. This can only happen
+// if an error-producing Transformer is passed to If.
+func (t Transformer) Bytes(b []byte) []byte {
+ b, _, err := transform.Bytes(t, b)
+ if err != nil {
+ return nil
+ }
+ return b
+}
+
+// String returns a string with the result of converting s using t. It calls
+// Reset on t. It returns the empty string if any error was found. This can only
+// happen if an error-producing Transformer is passed to If.
+func (t Transformer) String(s string) string {
+ s, _, err := transform.String(t, s)
+ if err != nil {
+ return ""
+ }
+ return s
+}
+
+// TODO:
+// - Copy: copying strings and bytes in whole-rune units.
+// - Validation (maybe)
+// - Well-formed-ness (maybe)
+
+const runeErrorString = string(utf8.RuneError)
+
+// Remove returns a Transformer that removes runes r for which s.Contains(r).
+// Illegal input bytes are replaced by RuneError before being passed to f.
+func Remove(s Set) Transformer {
+ if f, ok := s.(setFunc); ok {
+ // This little trick cuts the running time of BenchmarkRemove for sets
+ // created by Predicate roughly in half.
+ // TODO: special-case RangeTables as well.
+ return Transformer{remove(f)}
+ }
+ return Transformer{remove(s.Contains)}
+}
+
+// TODO: remove transform.RemoveFunc.
+
+type remove func(r rune) bool
+
+func (remove) Reset() {}
+
+// Span implements transform.Spanner.
+func (t remove) Span(src []byte, atEOF bool) (n int, err error) {
+ for r, size := rune(0), 0; n < len(src); {
+ if r = rune(src[n]); r < utf8.RuneSelf {
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
+ // Invalid rune.
+ if !atEOF && !utf8.FullRune(src[n:]) {
+ err = transform.ErrShortSrc
+ } else {
+ err = transform.ErrEndOfSpan
+ }
+ break
+ }
+ if t(r) {
+ err = transform.ErrEndOfSpan
+ break
+ }
+ n += size
+ }
+ return
+}
+
+// Transform implements transform.Transformer.
+func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ for r, size := rune(0), 0; nSrc < len(src); {
+ if r = rune(src[nSrc]); r < utf8.RuneSelf {
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
+ // Invalid rune.
+ if !atEOF && !utf8.FullRune(src[nSrc:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+ // We replace illegal bytes with RuneError. Not doing so might
+ // otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
+ // The resulting byte sequence may subsequently contain runes
+ // for which t(r) is true that were passed unnoticed.
+ if !t(utf8.RuneError) {
+ if nDst+3 > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ dst[nDst+0] = runeErrorString[0]
+ dst[nDst+1] = runeErrorString[1]
+ dst[nDst+2] = runeErrorString[2]
+ nDst += 3
+ }
+ nSrc++
+ continue
+ }
+ if t(r) {
+ nSrc += size
+ continue
+ }
+ if nDst+size > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ for i := 0; i < size; i++ {
+ dst[nDst] = src[nSrc]
+ nDst++
+ nSrc++
+ }
+ }
+ return
+}
+
+// Map returns a Transformer that maps the runes in the input using the given
+// mapping. Illegal bytes in the input are converted to utf8.RuneError before
+// being passed to the mapping func.
+func Map(mapping func(rune) rune) Transformer {
+ return Transformer{mapper(mapping)}
+}
+
+type mapper func(rune) rune
+
+func (mapper) Reset() {}
+
+// Span implements transform.Spanner.
+func (t mapper) Span(src []byte, atEOF bool) (n int, err error) {
+ for r, size := rune(0), 0; n < len(src); n += size {
+ if r = rune(src[n]); r < utf8.RuneSelf {
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
+ // Invalid rune.
+ if !atEOF && !utf8.FullRune(src[n:]) {
+ err = transform.ErrShortSrc
+ } else {
+ err = transform.ErrEndOfSpan
+ }
+ break
+ }
+ if t(r) != r {
+ err = transform.ErrEndOfSpan
+ break
+ }
+ }
+ return n, err
+}
+
+// Transform implements transform.Transformer.
+func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ var replacement rune
+ var b [utf8.UTFMax]byte
+
+ for r, size := rune(0), 0; nSrc < len(src); {
+ if r = rune(src[nSrc]); r < utf8.RuneSelf {
+ if replacement = t(r); replacement < utf8.RuneSelf {
+ if nDst == len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ dst[nDst] = byte(replacement)
+ nDst++
+ nSrc++
+ continue
+ }
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
+ // Invalid rune.
+ if !atEOF && !utf8.FullRune(src[nSrc:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+
+ if replacement = t(utf8.RuneError); replacement == utf8.RuneError {
+ if nDst+3 > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ dst[nDst+0] = runeErrorString[0]
+ dst[nDst+1] = runeErrorString[1]
+ dst[nDst+2] = runeErrorString[2]
+ nDst += 3
+ nSrc++
+ continue
+ }
+ } else if replacement = t(r); replacement == r {
+ if nDst+size > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ for i := 0; i < size; i++ {
+ dst[nDst] = src[nSrc]
+ nDst++
+ nSrc++
+ }
+ continue
+ }
+
+ n := utf8.EncodeRune(b[:], replacement)
+
+ if nDst+n > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ for i := 0; i < n; i++ {
+ dst[nDst] = b[i]
+ nDst++
+ }
+ nSrc += size
+ }
+ return
+}
+
+// ReplaceIllFormed returns a transformer that replaces all input bytes that are
+// not part of a well-formed UTF-8 code sequence with utf8.RuneError.
+func ReplaceIllFormed() Transformer {
+ return Transformer{&replaceIllFormed{}}
+}
+
+type replaceIllFormed struct{ transform.NopResetter }
+
+func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) {
+ for n < len(src) {
+ // ASCII fast path.
+ if src[n] < utf8.RuneSelf {
+ n++
+ continue
+ }
+
+ r, size := utf8.DecodeRune(src[n:])
+
+ // Look for a valid non-ASCII rune.
+ if r != utf8.RuneError || size != 1 {
+ n += size
+ continue
+ }
+
+ // Look for short source data.
+ if !atEOF && !utf8.FullRune(src[n:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+
+ // We have an invalid rune.
+ err = transform.ErrEndOfSpan
+ break
+ }
+ return n, err
+}
+
+func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ for nSrc < len(src) {
+ // ASCII fast path.
+ if r := src[nSrc]; r < utf8.RuneSelf {
+ if nDst == len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ dst[nDst] = r
+ nDst++
+ nSrc++
+ continue
+ }
+
+ // Look for a valid non-ASCII rune.
+ if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 {
+ if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
+ err = transform.ErrShortDst
+ break
+ }
+ nDst += size
+ nSrc += size
+ continue
+ }
+
+ // Look for short source data.
+ if !atEOF && !utf8.FullRune(src[nSrc:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+
+ // We have an invalid rune.
+ if nDst+3 > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ dst[nDst+0] = runeErrorString[0]
+ dst[nDst+1] = runeErrorString[1]
+ dst[nDst+2] = runeErrorString[2]
+ nDst += 3
+ nSrc++
+ }
+ return nDst, nSrc, err
+}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index cdc961e3e..1df811219 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -1320,6 +1320,24 @@
"revision": "d3c2f16719dedd34911cd626a98bd5879e1caaff",
"revisionTime": "2018-04-03T19:54:48Z"
},
+ {
+ "checksumSHA1": "B8KN0npDVBBnSDoL8htTSBpFgZ0=",
+ "path": "github.com/hashicorp/vault-plugin-secrets-ad/plugin",
+ "revision": "321ea9aa40719a982e9ad39fecd911a212d0d7c0",
+ "revisionTime": "2018-05-24T23:02:05Z"
+ },
+ {
+ "checksumSHA1": "qHGmA9y3hKMBSLRWLifD37EaHP4=",
+ "path": "github.com/hashicorp/vault-plugin-secrets-ad/plugin/client",
+ "revision": "321ea9aa40719a982e9ad39fecd911a212d0d7c0",
+ "revisionTime": "2018-05-24T23:02:05Z"
+ },
+ {
+ "checksumSHA1": "/wFdQSWF1zexkefiI7j+LrREMHk=",
+ "path": "github.com/hashicorp/vault-plugin-secrets-ad/plugin/util",
+ "revision": "321ea9aa40719a982e9ad39fecd911a212d0d7c0",
+ "revisionTime": "2018-05-24T23:02:05Z"
+ },
{
"checksumSHA1": "0BXf2h4FJSUTdVK3m75a1KXnYVA=",
"path": "github.com/hashicorp/vault-plugin-secrets-gcp/plugin",
From 35cb9bc517c52b4ba82434a3c7f94d362fbc0f0c Mon Sep 17 00:00:00 2001
From: Jeff Mitchell
Date: Fri, 25 May 2018 14:38:06 -0400
Subject: [PATCH 39/39] Redo API client locking (#4551)
* Redo API client locking
This assigns local values when in critical paths, allowing a single API
client to much more quickly and safely pipeline requests.
Additionally, in order to take that paradigm all the way it changes how
timeouts are set. It now uses a context value set on the request instead
of configuring the timeout in the http client per request, which was
also potentially quite racy.
Trivially tested with
VAULT_CLIENT_TIMEOUT=2 vault write pki/root/generate/internal key_type=rsa key_bits=8192
---
api/client.go | 83 +++++++++++++++++++++++++++++-----------------
api/client_test.go | 15 +--------
2 files changed, 53 insertions(+), 45 deletions(-)
diff --git a/api/client.go b/api/client.go
index 8f5a29868..ce10fff14 100644
--- a/api/client.go
+++ b/api/client.go
@@ -388,11 +388,12 @@ func (c *Client) SetAddress(addr string) error {
c.modifyLock.Lock()
defer c.modifyLock.Unlock()
- var err error
- if c.addr, err = url.Parse(addr); err != nil {
+ parsedAddr, err := url.Parse(addr)
+ if err != nil {
return errwrap.Wrapf("failed to set address: {{err}}", err)
}
+ c.addr = parsedAddr
return nil
}
@@ -411,7 +412,8 @@ func (c *Client) SetLimiter(rateLimit float64, burst int) {
c.modifyLock.RLock()
c.config.modifyLock.Lock()
defer c.config.modifyLock.Unlock()
- defer c.modifyLock.RUnlock()
+ c.modifyLock.RUnlock()
+
c.config.Limiter = rate.NewLimiter(rate.Limit(rateLimit), burst)
}
@@ -544,14 +546,20 @@ func (c *Client) SetPolicyOverride(override bool) {
// doesn't need to be called externally.
func (c *Client) NewRequest(method, requestPath string) *Request {
c.modifyLock.RLock()
- defer c.modifyLock.RUnlock()
+ addr := c.addr
+ token := c.token
+ mfaCreds := c.mfaCreds
+ wrappingLookupFunc := c.wrappingLookupFunc
+ headers := c.headers
+ policyOverride := c.policyOverride
+ c.modifyLock.RUnlock()
// if SRV records exist (see https://tools.ietf.org/html/draft-andrews-http-srv-02), lookup the SRV
// record and take the highest match; this is not designed for high-availability, just discovery
- var host string = c.addr.Host
- if c.addr.Port() == "" {
+ var host string = addr.Host
+ if addr.Port() == "" {
// Internet Draft specifies that the SRV record is ignored if a port is given
- _, addrs, err := net.LookupSRV("http", "tcp", c.addr.Hostname())
+ _, addrs, err := net.LookupSRV("http", "tcp", addr.Hostname())
if err == nil && len(addrs) > 0 {
host = fmt.Sprintf("%s:%d", addrs[0].Target, addrs[0].Port)
}
@@ -560,12 +568,12 @@ func (c *Client) NewRequest(method, requestPath string) *Request {
req := &Request{
Method: method,
URL: &url.URL{
- User: c.addr.User,
- Scheme: c.addr.Scheme,
+ User: addr.User,
+ Scheme: addr.Scheme,
Host: host,
- Path: path.Join(c.addr.Path, requestPath),
+ Path: path.Join(addr.Path, requestPath),
},
- ClientToken: c.token,
+ ClientToken: token,
Params: make(map[string][]string),
}
@@ -579,21 +587,19 @@ func (c *Client) NewRequest(method, requestPath string) *Request {
lookupPath = requestPath
}
- req.MFAHeaderVals = c.mfaCreds
+ req.MFAHeaderVals = mfaCreds
- if c.wrappingLookupFunc != nil {
- req.WrapTTL = c.wrappingLookupFunc(method, lookupPath)
+ if wrappingLookupFunc != nil {
+ req.WrapTTL = wrappingLookupFunc(method, lookupPath)
} else {
req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath)
}
- if c.config.Timeout != 0 {
- c.config.HttpClient.Timeout = c.config.Timeout
- }
- if c.headers != nil {
- req.Headers = c.headers
+
+ if headers != nil {
+ req.Headers = headers
}
- req.PolicyOverride = c.policyOverride
+ req.PolicyOverride = policyOverride
return req
}
@@ -602,18 +608,23 @@ func (c *Client) NewRequest(method, requestPath string) *Request {
// a Vault server not configured with this client. This is an advanced operation
// that generally won't need to be called externally.
func (c *Client) RawRequest(r *Request) (*Response, error) {
-
c.modifyLock.RLock()
- c.config.modifyLock.RLock()
- defer c.config.modifyLock.RUnlock()
-
- if c.config.Limiter != nil {
- c.config.Limiter.Wait(context.Background())
- }
-
token := c.token
+
+ c.config.modifyLock.RLock()
+ limiter := c.config.Limiter
+ maxRetries := c.config.MaxRetries
+ backoff := c.config.Backoff
+ httpClient := c.config.HttpClient
+ timeout := c.config.Timeout
+ c.config.modifyLock.RUnlock()
+
c.modifyLock.RUnlock()
+ if limiter != nil {
+ limiter.Wait(context.Background())
+ }
+
// Sanity check the token before potentially erroring from the API
idx := strings.IndexFunc(token, func(c rune) bool {
return !unicode.IsPrint(c)
@@ -632,16 +643,23 @@ START:
return nil, fmt.Errorf("nil request created")
}
- backoff := c.config.Backoff
+ // Set the timeout, if any
+ var cancelFunc context.CancelFunc
+ if timeout != 0 {
+ var ctx context.Context
+ ctx, cancelFunc = context.WithTimeout(context.Background(), timeout)
+ req.Request = req.Request.WithContext(ctx)
+ }
+
if backoff == nil {
backoff = retryablehttp.LinearJitterBackoff
}
client := &retryablehttp.Client{
- HTTPClient: c.config.HttpClient,
+ HTTPClient: httpClient,
RetryWaitMin: 1000 * time.Millisecond,
RetryWaitMax: 1500 * time.Millisecond,
- RetryMax: c.config.MaxRetries,
+ RetryMax: maxRetries,
CheckRetry: retryablehttp.DefaultRetryPolicy,
Backoff: backoff,
ErrorHandler: retryablehttp.PassthroughErrorHandler,
@@ -649,6 +667,9 @@ START:
var result *Response
resp, err := client.Do(req)
+ if cancelFunc != nil {
+ cancelFunc()
+ }
if resp != nil {
result = &Response{Response: resp}
}
diff --git a/api/client_test.go b/api/client_test.go
index 970354bab..5678478ea 100644
--- a/api/client_test.go
+++ b/api/client_test.go
@@ -7,7 +7,6 @@ import (
"os"
"strings"
"testing"
- "time"
)
func init() {
@@ -244,22 +243,10 @@ func TestClientTimeoutSetting(t *testing.T) {
defer os.Setenv(EnvVaultClientTimeout, oldClientTimeout)
config := DefaultConfig()
config.ReadEnvironment()
- client, err := NewClient(config)
+ _, err := NewClient(config)
if err != nil {
t.Fatal(err)
}
- _ = client.NewRequest("PUT", "/")
- if client.config.HttpClient.Timeout != time.Second*10 {
- t.Fatalf("error setting client timeout using env variable")
- }
-
- // Setting custom client timeout for a new request
- client.SetClientTimeout(time.Second * 20)
- _ = client.NewRequest("PUT", "/")
- if client.config.HttpClient.Timeout != time.Second*20 {
- t.Fatalf("error setting client timeout using SetClientTimeout")
- }
-
}
type roundTripperFunc func(*http.Request) (*http.Response, error)