From 0ff76e16d29f24ad01b79df801ade32599f03603 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Wed, 21 Sep 2016 10:29:42 -0400 Subject: [PATCH] Transit and audit enhancements --- audit/format.go | 331 ++++++++++++++++ audit/format_json.go | 187 +-------- audit/format_json_test.go | 15 +- audit/format_jsonx.go | 51 +++ audit/format_jsonx_test.go | 59 +++ audit/formatter.go | 14 +- builtin/audit/file/backend.go | 146 ++----- builtin/audit/syslog/backend.go | 150 ++----- builtin/logical/transit/backend.go | 5 + builtin/logical/transit/backend_test.go | 26 +- builtin/logical/transit/lock_manager.go | 103 +++-- builtin/logical/transit/path_encrypt.go | 49 ++- builtin/logical/transit/path_hash.go | 116 ++++++ builtin/logical/transit/path_hash_test.go | 87 ++++ builtin/logical/transit/path_hmac.go | 211 ++++++++++ builtin/logical/transit/path_hmac_test.go | 183 +++++++++ builtin/logical/transit/path_keys.go | 58 ++- builtin/logical/transit/path_random.go | 97 +++++ builtin/logical/transit/path_random_test.go | 98 +++++ builtin/logical/transit/path_sign_verify.go | 258 ++++++++++++ .../logical/transit/path_sign_verify_test.go | 201 ++++++++++ builtin/logical/transit/policy.go | 312 +++++++++++++-- builtin/logical/transit/policy_test.go | 33 +- helper/salt/salt.go | 18 +- vendor/github.com/jefferai/jsonx/LICENSE | 373 ++++++++++++++++++ vendor/github.com/jefferai/jsonx/README.md | 12 + vendor/github.com/jefferai/jsonx/jsonx.go | 132 +++++++ vendor/vendor.json | 6 + website/source/docs/audit/file.html.md | 10 +- website/source/docs/audit/syslog.html.md | 10 +- .../source/docs/secrets/transit/index.html.md | 365 +++++++++++++++-- 31 files changed, 3161 insertions(+), 555 deletions(-) create mode 100644 audit/format.go create mode 100644 audit/format_jsonx.go create mode 100644 audit/format_jsonx_test.go create mode 100644 builtin/logical/transit/path_hash.go create mode 100644 builtin/logical/transit/path_hash_test.go create mode 100644 builtin/logical/transit/path_hmac.go create mode 100644 builtin/logical/transit/path_hmac_test.go create mode 100644 builtin/logical/transit/path_random.go create mode 100644 builtin/logical/transit/path_random_test.go create mode 100644 builtin/logical/transit/path_sign_verify.go create mode 100644 builtin/logical/transit/path_sign_verify_test.go create mode 100644 vendor/github.com/jefferai/jsonx/LICENSE create mode 100644 vendor/github.com/jefferai/jsonx/README.md create mode 100644 vendor/github.com/jefferai/jsonx/jsonx.go diff --git a/audit/format.go b/audit/format.go new file mode 100644 index 000000000..7eab08702 --- /dev/null +++ b/audit/format.go @@ -0,0 +1,331 @@ +package audit + +import ( + "fmt" + "io" + "time" + + "github.com/hashicorp/vault/logical" + "github.com/mitchellh/copystructure" +) + +type AuditFormatWriter interface { + WriteRequest(io.Writer, *AuditRequestEntry) error + WriteResponse(io.Writer, *AuditResponseEntry) error +} + +// AuditFormatter implements the Formatter interface, and allows the underlying +// marshaller to be swapped out +type AuditFormatter struct { + AuditFormatWriter +} + +func (f *AuditFormatter) FormatRequest( + w io.Writer, + config FormatterConfig, + auth *logical.Auth, + req *logical.Request, + err error) error { + + if w == nil { + return fmt.Errorf("writer for audit request is nil") + } + + if f.AuditFormatWriter == nil { + return fmt.Errorf("no format writer specified") + } + + if !config.Raw { + // Before we copy the structure we must nil out some data + // otherwise we will cause reflection to panic and die + if req.Connection != nil && req.Connection.ConnState != nil { + origReq := req + origState := req.Connection.ConnState + req.Connection.ConnState = nil + defer func() { + origReq.Connection.ConnState = origState + }() + } + + // Copy the structures + cp, err := copystructure.Copy(auth) + if err != nil { + return err + } + auth = cp.(*logical.Auth) + + cp, err = copystructure.Copy(req) + if err != nil { + return err + } + req = cp.(*logical.Request) + + // Hash any sensitive information + if err := Hash(config.Salt, auth); err != nil { + return err + } + if err := Hash(config.Salt, req); err != nil { + return err + } + } + + // If auth is nil, make an empty one + if auth == nil { + auth = new(logical.Auth) + } + var errString string + if err != nil { + errString = err.Error() + } + + reqEntry := &AuditRequestEntry{ + Type: "request", + Error: errString, + + Auth: AuditAuth{ + DisplayName: auth.DisplayName, + Policies: auth.Policies, + Metadata: auth.Metadata, + }, + + Request: AuditRequest{ + ID: req.ID, + ClientToken: req.ClientToken, + Operation: req.Operation, + Path: req.Path, + Data: req.Data, + RemoteAddr: getRemoteAddr(req), + WrapTTL: int(req.WrapTTL / time.Second), + }, + } + + if !config.OmitTime { + reqEntry.Time = time.Now().UTC().Format(time.RFC3339) + } + + return f.AuditFormatWriter.WriteRequest(w, reqEntry) +} + +func (f *AuditFormatter) FormatResponse( + w io.Writer, + config FormatterConfig, + auth *logical.Auth, + req *logical.Request, + resp *logical.Response, + err error) error { + + if w == nil { + return fmt.Errorf("writer for audit request is nil") + } + + if f.AuditFormatWriter == nil { + return fmt.Errorf("no format writer specified") + } + + if !config.Raw { + // Before we copy the structure we must nil out some data + // otherwise we will cause reflection to panic and die + if req.Connection != nil && req.Connection.ConnState != nil { + origReq := req + origState := req.Connection.ConnState + req.Connection.ConnState = nil + defer func() { + origReq.Connection.ConnState = origState + }() + } + + // Copy the structure + cp, err := copystructure.Copy(auth) + if err != nil { + return err + } + auth = cp.(*logical.Auth) + + cp, err = copystructure.Copy(req) + if err != nil { + return err + } + req = cp.(*logical.Request) + + cp, err = copystructure.Copy(resp) + if err != nil { + return err + } + resp = cp.(*logical.Response) + + // Hash any sensitive information + + // Cache and restore accessor in the auth + var accessor, wrappedAccessor string + if !config.HMACAccessor && auth != nil && auth.Accessor != "" { + accessor = auth.Accessor + } + if err := Hash(config.Salt, auth); err != nil { + return err + } + if accessor != "" { + auth.Accessor = accessor + } + + if err := Hash(config.Salt, req); err != nil { + return err + } + + // Cache and restore accessor in the response + accessor = "" + if !config.HMACAccessor && resp != nil && resp.Auth != nil && resp.Auth.Accessor != "" { + accessor = resp.Auth.Accessor + } + if !config.HMACAccessor && resp != nil && resp.WrapInfo != nil && resp.WrapInfo.WrappedAccessor != "" { + wrappedAccessor = resp.WrapInfo.WrappedAccessor + } + if err := Hash(config.Salt, resp); err != nil { + return err + } + if accessor != "" { + resp.Auth.Accessor = accessor + } + if wrappedAccessor != "" { + resp.WrapInfo.WrappedAccessor = wrappedAccessor + } + } + + // If things are nil, make empty to avoid panics + if auth == nil { + auth = new(logical.Auth) + } + if resp == nil { + resp = new(logical.Response) + } + var errString string + if err != nil { + errString = err.Error() + } + + var respAuth *AuditAuth + if resp.Auth != nil { + respAuth = &AuditAuth{ + ClientToken: resp.Auth.ClientToken, + Accessor: resp.Auth.Accessor, + DisplayName: resp.Auth.DisplayName, + Policies: resp.Auth.Policies, + Metadata: resp.Auth.Metadata, + } + } + + var respSecret *AuditSecret + if resp.Secret != nil { + respSecret = &AuditSecret{ + LeaseID: resp.Secret.LeaseID, + } + } + + var respWrapInfo *AuditWrapInfo + if resp.WrapInfo != nil { + respWrapInfo = &AuditWrapInfo{ + TTL: int(resp.WrapInfo.TTL / time.Second), + Token: resp.WrapInfo.Token, + CreationTime: resp.WrapInfo.CreationTime, + WrappedAccessor: resp.WrapInfo.WrappedAccessor, + } + } + + respEntry := &AuditResponseEntry{ + Type: "response", + Error: errString, + + Auth: AuditAuth{ + DisplayName: auth.DisplayName, + Policies: auth.Policies, + Metadata: auth.Metadata, + }, + + Request: AuditRequest{ + ID: req.ID, + ClientToken: req.ClientToken, + Operation: req.Operation, + Path: req.Path, + Data: req.Data, + RemoteAddr: getRemoteAddr(req), + WrapTTL: int(req.WrapTTL / time.Second), + }, + + Response: AuditResponse{ + Auth: respAuth, + Secret: respSecret, + Data: resp.Data, + Redirect: resp.Redirect, + WrapInfo: respWrapInfo, + }, + } + + if !config.OmitTime { + respEntry.Time = time.Now().UTC().Format(time.RFC3339) + } + + return f.AuditFormatWriter.WriteResponse(w, respEntry) +} + +// AuditRequest is the structure of a request audit log entry in Audit. +type AuditRequestEntry struct { + Time string `json:"time,omitempty"` + Type string `json:"type"` + Auth AuditAuth `json:"auth"` + Request AuditRequest `json:"request"` + Error string `json:"error"` +} + +// AuditResponseEntry is the structure of a response audit log entry in Audit. +type AuditResponseEntry struct { + Time string `json:"time,omitempty"` + Type string `json:"type"` + Error string `json:"error"` + Auth AuditAuth `json:"auth"` + Request AuditRequest `json:"request"` + Response AuditResponse `json:"response"` +} + +type AuditRequest struct { + ID string `json:"id"` + Operation logical.Operation `json:"operation"` + ClientToken string `json:"client_token"` + Path string `json:"path"` + Data map[string]interface{} `json:"data"` + RemoteAddr string `json:"remote_address"` + WrapTTL int `json:"wrap_ttl"` +} + +type AuditResponse struct { + Auth *AuditAuth `json:"auth,omitempty"` + Secret *AuditSecret `json:"secret,emitempty"` + Data map[string]interface{} `json:"data"` + Redirect string `json:"redirect"` + WrapInfo *AuditWrapInfo `json:"wrap_info,omitempty"` +} + +type AuditAuth struct { + ClientToken string `json:"client_token,omitempty"` + Accessor string `json:"accessor,omitempty"` + DisplayName string `json:"display_name"` + Policies []string `json:"policies"` + Metadata map[string]string `json:"metadata"` +} + +type AuditSecret struct { + LeaseID string `json:"lease_id"` +} + +type AuditWrapInfo struct { + TTL int `json:"ttl"` + Token string `json:"token"` + CreationTime time.Time `json:"creation_time"` + WrappedAccessor string `json:"wrapped_accessor,omitempty"` +} + +// getRemoteAddr safely gets the remote address avoiding a nil pointer +func getRemoteAddr(req *logical.Request) string { + if req != nil && req.Connection != nil { + return req.Connection.RemoteAddr + } + return "" +} diff --git a/audit/format_json.go b/audit/format_json.go index cd76f86e6..241be345c 100644 --- a/audit/format_json.go +++ b/audit/format_json.go @@ -2,195 +2,28 @@ package audit import ( "encoding/json" + "fmt" "io" - "time" - - "github.com/hashicorp/vault/logical" ) -// FormatJSON is a Formatter implementation that structures data into +// JSONFormatWriter is an AuditFormatWriter implementation that structures data into // a JSON format. -type FormatJSON struct{} +type JSONFormatWriter struct{} -func (f *FormatJSON) FormatRequest( - w io.Writer, - auth *logical.Auth, - req *logical.Request, - err error) error { - - // If auth is nil, make an empty one - if auth == nil { - auth = new(logical.Auth) - } - var errString string - if err != nil { - errString = err.Error() +func (f *JSONFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error { + if req == nil { + return fmt.Errorf("request entry was nil, cannot encode") } - // Encode! enc := json.NewEncoder(w) - return enc.Encode(&JSONRequestEntry{ - Time: time.Now().UTC().Format(time.RFC3339Nano), - Type: "request", - Error: errString, - - Auth: JSONAuth{ - DisplayName: auth.DisplayName, - Policies: auth.Policies, - Metadata: auth.Metadata, - }, - - Request: JSONRequest{ - ClientToken: req.ClientToken, - ID: req.ID, - Operation: req.Operation, - Path: req.Path, - Data: req.Data, - RemoteAddr: getRemoteAddr(req), - WrapTTL: int(req.WrapTTL / time.Second), - }, - }) + return enc.Encode(req) } -func (f *FormatJSON) FormatResponse( - w io.Writer, - auth *logical.Auth, - req *logical.Request, - resp *logical.Response, - err error) error { - // If things are nil, make empty to avoid panics - if auth == nil { - auth = new(logical.Auth) - } +func (f *JSONFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry) error { if resp == nil { - resp = new(logical.Response) - } - var errString string - if err != nil { - errString = err.Error() + return fmt.Errorf("response entry was nil, cannot encode") } - var respAuth *JSONAuth - if resp.Auth != nil { - respAuth = &JSONAuth{ - ClientToken: resp.Auth.ClientToken, - Accessor: resp.Auth.Accessor, - DisplayName: resp.Auth.DisplayName, - Policies: resp.Auth.Policies, - Metadata: resp.Auth.Metadata, - } - } - - var respSecret *JSONSecret - if resp.Secret != nil { - respSecret = &JSONSecret{ - LeaseID: resp.Secret.LeaseID, - } - } - - var respWrapInfo *JSONWrapInfo - if resp.WrapInfo != nil { - respWrapInfo = &JSONWrapInfo{ - TTL: int(resp.WrapInfo.TTL / time.Second), - Token: resp.WrapInfo.Token, - CreationTime: resp.WrapInfo.CreationTime, - WrappedAccessor: resp.WrapInfo.WrappedAccessor, - } - } - - // Encode! enc := json.NewEncoder(w) - return enc.Encode(&JSONResponseEntry{ - Time: time.Now().UTC().Format(time.RFC3339Nano), - Type: "response", - Error: errString, - - Auth: JSONAuth{ - DisplayName: auth.DisplayName, - Policies: auth.Policies, - Metadata: auth.Metadata, - }, - - Request: JSONRequest{ - ClientToken: req.ClientToken, - ID: req.ID, - Operation: req.Operation, - Path: req.Path, - Data: req.Data, - RemoteAddr: getRemoteAddr(req), - WrapTTL: int(req.WrapTTL / time.Second), - }, - - Response: JSONResponse{ - Auth: respAuth, - Secret: respSecret, - Data: resp.Data, - Redirect: resp.Redirect, - WrapInfo: respWrapInfo, - }, - }) -} - -// JSONRequest is the structure of a request audit log entry in JSON. -type JSONRequestEntry struct { - Time string `json:"time"` - Type string `json:"type"` - Auth JSONAuth `json:"auth"` - Request JSONRequest `json:"request"` - Error string `json:"error"` -} - -// JSONResponseEntry is the structure of a response audit log entry in JSON. -type JSONResponseEntry struct { - Time string `json:"time"` - Type string `json:"type"` - Error string `json:"error"` - Auth JSONAuth `json:"auth"` - Request JSONRequest `json:"request"` - Response JSONResponse `json:"response"` -} - -type JSONRequest struct { - ID string `json:"id"` - Operation logical.Operation `json:"operation"` - ClientToken string `json:"client_token"` - Path string `json:"path"` - Data map[string]interface{} `json:"data"` - RemoteAddr string `json:"remote_address"` - WrapTTL int `json:"wrap_ttl"` -} - -type JSONResponse struct { - Auth *JSONAuth `json:"auth,omitempty"` - Secret *JSONSecret `json:"secret,emitempty"` - Data map[string]interface{} `json:"data"` - Redirect string `json:"redirect"` - WrapInfo *JSONWrapInfo `json:"wrap_info,omitempty"` -} - -type JSONAuth struct { - ClientToken string `json:"client_token,omitempty"` - Accessor string `json:"accessor,omitempty"` - DisplayName string `json:"display_name"` - Policies []string `json:"policies"` - Metadata map[string]string `json:"metadata"` -} - -type JSONSecret struct { - LeaseID string `json:"lease_id"` -} - -type JSONWrapInfo struct { - TTL int `json:"ttl"` - Token string `json:"token"` - CreationTime time.Time `json:"creation_time"` - WrappedAccessor string `json:"wrapped_accessor,omitempty"` -} - -// getRemoteAddr safely gets the remote address avoiding a nil pointer -func getRemoteAddr(req *logical.Request) string { - if req != nil && req.Connection != nil { - return req.Connection.RemoteAddr - } - return "" + return enc.Encode(resp) } diff --git a/audit/format_json_test.go b/audit/format_json_test.go index e2e7c9488..c4cb2da54 100644 --- a/audit/format_json_test.go +++ b/audit/format_json_test.go @@ -10,6 +10,7 @@ import ( "errors" "github.com/hashicorp/vault/helper/jsonutil" + "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" ) @@ -37,17 +38,23 @@ func TestFormatJSON_formatRequest(t *testing.T) { for name, tc := range cases { var buf bytes.Buffer - var format FormatJSON - if err := format.FormatRequest(&buf, tc.Auth, tc.Req, tc.Err); err != nil { + formatter := AuditFormatter{ + AuditFormatWriter: &JSONFormatWriter{}, + } + salter, _ := salt.NewSalt(nil, nil) + config := FormatterConfig{ + Salt: salter, + } + if err := formatter.FormatRequest(&buf, config, tc.Auth, tc.Req, tc.Err); err != nil { t.Fatalf("bad: %s\nerr: %s", name, err) } - var expectedjson = new(JSONRequestEntry) + var expectedjson = new(AuditRequestEntry) if err := jsonutil.DecodeJSON([]byte(tc.Result), &expectedjson); err != nil { t.Fatalf("bad json: %s", err) } - var actualjson = new(JSONRequestEntry) + var actualjson = new(AuditRequestEntry) if err := jsonutil.DecodeJSON([]byte(buf.String()), &actualjson); err != nil { t.Fatalf("bad json: %s", err) } diff --git a/audit/format_jsonx.go b/audit/format_jsonx.go new file mode 100644 index 000000000..b3c814f72 --- /dev/null +++ b/audit/format_jsonx.go @@ -0,0 +1,51 @@ +package audit + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/jefferai/jsonx" +) + +// JSONxFormatWriter is an AuditFormatWriter implementation that structures data into +// a XML format. +type JSONxFormatWriter struct{} + +func (f *JSONxFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error { + if req == nil { + return fmt.Errorf("request entry was nil, cannot encode") + } + + jsonBytes, err := json.Marshal(req) + if err != nil { + return err + } + + xmlBytes, err := jsonx.EncodeJSONBytes(jsonBytes) + if err != nil { + return err + } + + _, err = w.Write(xmlBytes) + return err +} + +func (f *JSONxFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry) error { + if resp == nil { + return fmt.Errorf("response entry was nil, cannot encode") + } + + jsonBytes, err := json.Marshal(resp) + if err != nil { + return err + } + + xmlBytes, err := jsonx.EncodeJSONBytes(jsonBytes) + if err != nil { + return err + } + + _, err = w.Write(xmlBytes) + return err +} diff --git a/audit/format_jsonx_test.go b/audit/format_jsonx_test.go new file mode 100644 index 000000000..2e92d01e9 --- /dev/null +++ b/audit/format_jsonx_test.go @@ -0,0 +1,59 @@ +package audit + +import ( + "bytes" + "strings" + "testing" + "time" + + "errors" + + "github.com/hashicorp/vault/helper/salt" + "github.com/hashicorp/vault/logical" +) + +func TestFormatJSONx_formatRequest(t *testing.T) { + cases := map[string]struct { + Auth *logical.Auth + Req *logical.Request + Err error + Result string + Expected string + }{ + "auth, request": { + &logical.Auth{ClientToken: "foo", Policies: []string{"root"}}, + &logical.Request{ + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapTTL: 60 * time.Second, + }, + errors.New("this is an error"), + "", + `rootthis is an errorupdate/foo127.0.0.160request`, + }, + } + + for name, tc := range cases { + var buf bytes.Buffer + formatter := AuditFormatter{ + AuditFormatWriter: &JSONxFormatWriter{}, + } + salter, _ := salt.NewSalt(nil, nil) + config := FormatterConfig{ + Salt: salter, + OmitTime: true, + } + if err := formatter.FormatRequest(&buf, config, tc.Auth, tc.Req, tc.Err); err != nil { + t.Fatalf("bad: %s\nerr: %s", name, err) + } + + if strings.TrimSpace(buf.String()) != string(tc.Expected) { + t.Fatalf( + "bad: %s\nResult:\n\n'%s'\n\nExpected:\n\n'%s'", + name, strings.TrimSpace(buf.String()), string(tc.Expected)) + } + } +} diff --git a/audit/formatter.go b/audit/formatter.go index 45f665ed4..318bd1bc5 100644 --- a/audit/formatter.go +++ b/audit/formatter.go @@ -3,6 +3,7 @@ package audit import ( "io" + "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" ) @@ -12,6 +13,15 @@ import ( // // It is recommended that you pass data through Hash prior to formatting it. type Formatter interface { - FormatRequest(io.Writer, *logical.Auth, *logical.Request, error) error - FormatResponse(io.Writer, *logical.Auth, *logical.Request, *logical.Response, error) error + FormatRequest(io.Writer, FormatterConfig, *logical.Auth, *logical.Request, error) error + FormatResponse(io.Writer, FormatterConfig, *logical.Auth, *logical.Request, *logical.Response, error) error +} + +type FormatterConfig struct { + Raw bool + Salt *salt.Salt + HMACAccessor bool + + // This should only ever be used in a testing context + OmitTime bool } diff --git a/builtin/audit/file/backend.go b/builtin/audit/file/backend.go index 08e8c84d8..80e654630 100644 --- a/builtin/audit/file/backend.go +++ b/builtin/audit/file/backend.go @@ -8,9 +8,7 @@ import ( "sync" "github.com/hashicorp/vault/audit" - "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" - "github.com/mitchellh/copystructure" ) func Factory(conf *audit.BackendConfig) (audit.Backend, error) { @@ -26,6 +24,16 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) { } } + format, ok := conf.Config["format"] + if !ok { + format = "json" + } + switch format { + case "json", "jsonx": + default: + return nil, fmt.Errorf("unknown format type %s", format) + } + // Check if hashing of accessor is disabled hmacAccessor := true if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok { @@ -47,10 +55,19 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) { } b := &Backend{ - path: path, - logRaw: logRaw, - hmacAccessor: hmacAccessor, - salt: conf.Salt, + path: path, + formatConfig: audit.FormatterConfig{ + Raw: logRaw, + Salt: conf.Salt, + HMACAccessor: hmacAccessor, + }, + } + + switch format { + case "json": + b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{} + case "jsonx": + b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{} } // Ensure that the file can be successfully opened for writing; @@ -69,60 +86,25 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) { // It doesn't do anything more at the moment to assist with rotation // or reset the write cursor, this should be done in the future. type Backend struct { - path string - logRaw bool - hmacAccessor bool - salt *salt.Salt + path string + + formatter audit.AuditFormatter + formatConfig audit.FormatterConfig once sync.Once f *os.File } func (b *Backend) GetHash(data string) string { - return audit.HashString(b.salt, data) + return audit.HashString(b.formatConfig.Salt, data) } func (b *Backend) LogRequest(auth *logical.Auth, req *logical.Request, outerErr error) error { if err := b.open(); err != nil { return err } - if !b.logRaw { - // Before we copy the structure we must nil out some data - // otherwise we will cause reflection to panic and die - if req.Connection != nil && req.Connection.ConnState != nil { - origReq := req - origState := req.Connection.ConnState - req.Connection.ConnState = nil - defer func() { - origReq.Connection.ConnState = origState - }() - } - // Copy the structures - cp, err := copystructure.Copy(auth) - if err != nil { - return err - } - auth = cp.(*logical.Auth) - - cp, err = copystructure.Copy(req) - if err != nil { - return err - } - req = cp.(*logical.Request) - - // Hash any sensitive information - if err := audit.Hash(b.salt, auth); err != nil { - return err - } - if err := audit.Hash(b.salt, req); err != nil { - return err - } - - } - - var format audit.FormatJSON - return format.FormatRequest(b.f, auth, req, outerErr) + return b.formatter.FormatRequest(b.f, b.formatConfig, auth, req, outerErr) } func (b *Backend) LogResponse( @@ -133,76 +115,8 @@ func (b *Backend) LogResponse( if err := b.open(); err != nil { return err } - if !b.logRaw { - // Before we copy the structure we must nil out some data - // otherwise we will cause reflection to panic and die - if req.Connection != nil && req.Connection.ConnState != nil { - origReq := req - origState := req.Connection.ConnState - req.Connection.ConnState = nil - defer func() { - origReq.Connection.ConnState = origState - }() - } - // Copy the structure - cp, err := copystructure.Copy(auth) - if err != nil { - return err - } - auth = cp.(*logical.Auth) - - cp, err = copystructure.Copy(req) - if err != nil { - return err - } - req = cp.(*logical.Request) - - cp, err = copystructure.Copy(resp) - if err != nil { - return err - } - resp = cp.(*logical.Response) - - // Hash any sensitive information - - // Cache and restore accessor in the auth - var accessor, wrappedAccessor string - if !b.hmacAccessor && auth != nil && auth.Accessor != "" { - accessor = auth.Accessor - } - if err := audit.Hash(b.salt, auth); err != nil { - return err - } - if accessor != "" { - auth.Accessor = accessor - } - - if err := audit.Hash(b.salt, req); err != nil { - return err - } - - // Cache and restore accessor in the response - accessor = "" - if !b.hmacAccessor && resp != nil && resp.Auth != nil && resp.Auth.Accessor != "" { - accessor = resp.Auth.Accessor - } - if !b.hmacAccessor && resp != nil && resp.WrapInfo != nil && resp.WrapInfo.WrappedAccessor != "" { - wrappedAccessor = resp.WrapInfo.WrappedAccessor - } - if err := audit.Hash(b.salt, resp); err != nil { - return err - } - if accessor != "" { - resp.Auth.Accessor = accessor - } - if wrappedAccessor != "" { - resp.WrapInfo.WrappedAccessor = wrappedAccessor - } - } - - var format audit.FormatJSON - return format.FormatResponse(b.f, auth, req, resp, err) + return b.formatter.FormatResponse(b.f, b.formatConfig, auth, req, resp, err) } func (b *Backend) open() error { diff --git a/builtin/audit/syslog/backend.go b/builtin/audit/syslog/backend.go index c603ed836..bde7ca764 100644 --- a/builtin/audit/syslog/backend.go +++ b/builtin/audit/syslog/backend.go @@ -7,9 +7,7 @@ import ( "github.com/hashicorp/go-syslog" "github.com/hashicorp/vault/audit" - "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" - "github.com/mitchellh/copystructure" ) func Factory(conf *audit.BackendConfig) (audit.Backend, error) { @@ -29,6 +27,16 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) { tag = "vault" } + format, ok := conf.Config["format"] + if !ok { + format = "json" + } + switch format { + case "json", "jsonx": + default: + return nil, fmt.Errorf("unknown format type %s", format) + } + // Check if hashing of accessor is disabled hmacAccessor := true if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok { @@ -56,65 +64,39 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) { } b := &Backend{ - logger: logger, - logRaw: logRaw, - hmacAccessor: hmacAccessor, - salt: conf.Salt, + logger: logger, + formatConfig: audit.FormatterConfig{ + Raw: logRaw, + Salt: conf.Salt, + HMACAccessor: hmacAccessor, + }, } + + switch format { + case "json": + b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{} + case "jsonx": + b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{} + } + return b, nil } // Backend is the audit backend for the syslog-based audit store. type Backend struct { - logger gsyslog.Syslogger - logRaw bool - hmacAccessor bool - salt *salt.Salt + logger gsyslog.Syslogger + + formatter audit.AuditFormatter + formatConfig audit.FormatterConfig } func (b *Backend) GetHash(data string) string { - return audit.HashString(b.salt, data) + return audit.HashString(b.formatConfig.Salt, data) } func (b *Backend) LogRequest(auth *logical.Auth, req *logical.Request, outerErr error) error { - if !b.logRaw { - // Before we copy the structure we must nil out some data - // otherwise we will cause reflection to panic and die - if req.Connection != nil && req.Connection.ConnState != nil { - origReq := req - origState := req.Connection.ConnState - req.Connection.ConnState = nil - defer func() { - origReq.Connection.ConnState = origState - }() - } - - // Copy the structures - cp, err := copystructure.Copy(auth) - if err != nil { - return err - } - auth = cp.(*logical.Auth) - - cp, err = copystructure.Copy(req) - if err != nil { - return err - } - req = cp.(*logical.Request) - - // Hash any sensitive information - if err := audit.Hash(b.salt, auth); err != nil { - return err - } - if err := audit.Hash(b.salt, req); err != nil { - return err - } - } - - // Encode the entry as JSON var buf bytes.Buffer - var format audit.FormatJSON - if err := format.FormatRequest(&buf, auth, req, outerErr); err != nil { + if err := b.formatter.FormatRequest(&buf, b.formatConfig, auth, req, outerErr); err != nil { return err } @@ -125,78 +107,8 @@ func (b *Backend) LogRequest(auth *logical.Auth, req *logical.Request, outerErr func (b *Backend) LogResponse(auth *logical.Auth, req *logical.Request, resp *logical.Response, err error) error { - if !b.logRaw { - // Before we copy the structure we must nil out some data - // otherwise we will cause reflection to panic and die - if req.Connection != nil && req.Connection.ConnState != nil { - origReq := req - origState := req.Connection.ConnState - req.Connection.ConnState = nil - defer func() { - origReq.Connection.ConnState = origState - }() - } - - // Copy the structure - cp, err := copystructure.Copy(auth) - if err != nil { - return err - } - auth = cp.(*logical.Auth) - - cp, err = copystructure.Copy(req) - if err != nil { - return err - } - req = cp.(*logical.Request) - - cp, err = copystructure.Copy(resp) - if err != nil { - return err - } - resp = cp.(*logical.Response) - - // Hash any sensitive information - - // Cache and restore accessor in the auth - var accessor, wrappedAccessor string - if !b.hmacAccessor && auth != nil && auth.Accessor != "" { - accessor = auth.Accessor - } - if err := audit.Hash(b.salt, auth); err != nil { - return err - } - if accessor != "" { - auth.Accessor = accessor - } - - if err := audit.Hash(b.salt, req); err != nil { - return err - } - - // Cache and restore accessor in the response - accessor = "" - if !b.hmacAccessor && resp != nil && resp.Auth != nil && resp.Auth.Accessor != "" { - accessor = resp.Auth.Accessor - } - if !b.hmacAccessor && resp != nil && resp.WrapInfo != nil && resp.WrapInfo.WrappedAccessor != "" { - wrappedAccessor = resp.WrapInfo.WrappedAccessor - } - if err := audit.Hash(b.salt, resp); err != nil { - return err - } - if accessor != "" { - resp.Auth.Accessor = accessor - } - if wrappedAccessor != "" { - resp.WrapInfo.WrappedAccessor = wrappedAccessor - } - } - - // Encode the entry as JSON var buf bytes.Buffer - var format audit.FormatJSON - if err := format.FormatResponse(&buf, auth, req, resp, err); err != nil { + if err := b.formatter.FormatResponse(&buf, b.formatConfig, auth, req, resp, err); err != nil { return err } diff --git a/builtin/logical/transit/backend.go b/builtin/logical/transit/backend.go index 94d07cbd9..86bde4fe3 100644 --- a/builtin/logical/transit/backend.go +++ b/builtin/logical/transit/backend.go @@ -28,6 +28,11 @@ func Backend(conf *logical.BackendConfig) *backend { b.pathEncrypt(), b.pathDecrypt(), b.pathDatakey(), + b.pathRandom(), + b.pathHash(), + b.pathHMAC(), + b.pathSign(), + b.pathVerify(), }, Secrets: []*framework.Secret{}, diff --git a/builtin/logical/transit/backend_test.go b/builtin/logical/transit/backend_test.go index b3ed0784d..2460f7459 100644 --- a/builtin/logical/transit/backend_test.go +++ b/builtin/logical/transit/backend_test.go @@ -229,7 +229,7 @@ func testAccStepReadPolicy(t *testing.T, name string, expectNone, derived bool) Name string `mapstructure:"name"` Key []byte `mapstructure:"key"` Keys map[string]int64 `mapstructure:"keys"` - CipherMode string `mapstructure:"cipher_mode"` + Type string `mapstructure:"type"` Derived bool `mapstructure:"derived"` KDF string `mapstructure:"kdf"` DeletionAllowed bool `mapstructure:"deletion_allowed"` @@ -240,10 +240,10 @@ func testAccStepReadPolicy(t *testing.T, name string, expectNone, derived bool) } if d.Name != name { - return fmt.Errorf("bad: %#v", d) + return fmt.Errorf("bad name: %#v", d) } - if d.CipherMode != "aes-gcm" { - return fmt.Errorf("bad: %#v", d) + if d.Type != KeyType(keyType_AES256_GCM96).String() { + return fmt.Errorf("bad key type: %#v", d) } // Should NOT get a key back if d.Key != nil { @@ -537,9 +537,9 @@ func testAccStepDecryptDatakey(t *testing.T, name string, func TestKeyUpgrade(t *testing.T) { key, _ := uuid.GenerateRandomBytes(32) p := &policy{ - Name: "test", - Key: key, - CipherMode: "aes-gcm", + Name: "test", + Key: key, + Type: keyType_AES256_GCM96, } p.migrateKeyToKeysMap() @@ -547,7 +547,7 @@ func TestKeyUpgrade(t *testing.T) { if p.Key != nil || p.Keys == nil || len(p.Keys) != 1 || - !reflect.DeepEqual(p.Keys[1].Key, key) { + !reflect.DeepEqual(p.Keys[1].AESKey, key) { t.Errorf("bad key migration, result is %#v", p.Keys) } } @@ -558,10 +558,10 @@ func TestDerivedKeyUpgrade(t *testing.T) { context, _ := uuid.GenerateRandomBytes(32) p := &policy{ - Name: "test", - Key: key, - CipherMode: "aes-gcm", - Derived: true, + Name: "test", + Key: key, + Type: keyType_AES256_GCM96, + Derived: true, } p.migrateKeyToKeysMap() @@ -647,7 +647,7 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) { p := &policy{ Name: "testkey", - CipherMode: "aes-gcm", + Type: keyType_AES256_GCM96, Derived: true, ConvergentEncryption: true, ConvergentVersion: ver, diff --git a/builtin/logical/transit/lock_manager.go b/builtin/logical/transit/lock_manager.go index 1529c233c..4f2164a28 100644 --- a/builtin/logical/transit/lock_manager.go +++ b/builtin/logical/transit/lock_manager.go @@ -18,6 +18,28 @@ var ( errNeedExclusiveLock = errors.New("an exclusive lock is needed for this operation") ) +// policyRequest holds values used when requesting a policy. Most values are +// only used during an upsert. +type policyRequest struct { + // The storage to use + storage logical.Storage + + // The name of the policy + name string + + // The key type + keyType KeyType + + // Whether it should be derived + derived bool + + // Whether to enable convergent encryption + convergent bool + + // Whether to upsert + upsert bool +} + type lockManager struct { // A lock for each named key locks map[string]*sync.RWMutex @@ -105,58 +127,72 @@ func (lm *lockManager) UnlockPolicy(lock *sync.RWMutex, lockType bool) { // is needed (for instance, for an upgrade/migration), give up the read lock, // call again with an exclusive lock, then swap back out for a read lock. func (lm *lockManager) GetPolicyShared(storage logical.Storage, name string) (*policy, *sync.RWMutex, error) { - p, lock, _, err := lm.getPolicyCommon(storage, name, false, false, false, shared) + p, lock, _, err := lm.getPolicyCommon(policyRequest{ + storage: storage, + name: name, + }, shared) if err == nil || (err != nil && err != errNeedExclusiveLock) { return p, lock, err } // Try again while asking for an exlusive lock - p, lock, _, err = lm.getPolicyCommon(storage, name, false, false, false, exclusive) + p, lock, _, err = lm.getPolicyCommon(policyRequest{ + storage: storage, + name: name, + }, exclusive) if err != nil || p == nil || lock == nil { return p, lock, err } lock.Unlock() - p, lock, _, err = lm.getPolicyCommon(storage, name, false, false, false, shared) + p, lock, _, err = lm.getPolicyCommon(policyRequest{ + storage: storage, + name: name, + }, shared) return p, lock, err } // Get the policy with an exclusive lock func (lm *lockManager) GetPolicyExclusive(storage logical.Storage, name string) (*policy, *sync.RWMutex, error) { - p, lock, _, err := lm.getPolicyCommon(storage, name, false, false, false, exclusive) + p, lock, _, err := lm.getPolicyCommon(policyRequest{ + storage: storage, + name: name, + }, exclusive) return p, lock, err } // Get the policy with a read lock; if it returns that an exclusive lock is // needed, retry. If successful, call one more time to get a read lock and // return the value. -func (lm *lockManager) GetPolicyUpsert(storage logical.Storage, name string, derived, convergent bool) (*policy, *sync.RWMutex, bool, error) { - p, lock, _, err := lm.getPolicyCommon(storage, name, true, derived, convergent, shared) +func (lm *lockManager) GetPolicyUpsert(req policyRequest) (*policy, *sync.RWMutex, bool, error) { + req.upsert = true + + p, lock, _, err := lm.getPolicyCommon(req, shared) if err == nil || (err != nil && err != errNeedExclusiveLock) { return p, lock, false, err } // Try again while asking for an exlusive lock - p, lock, upserted, err := lm.getPolicyCommon(storage, name, true, derived, convergent, exclusive) + p, lock, upserted, err := lm.getPolicyCommon(req, exclusive) if err != nil || p == nil || lock == nil { return p, lock, upserted, err } - lock.Unlock() - // Now get a shared lock for the return, but preserve the value of upsert - p, lock, _, err = lm.getPolicyCommon(storage, name, true, derived, convergent, shared) + req.upsert = false + // Now get a shared lock for the return, but preserve the value of upserted + p, lock, _, err = lm.getPolicyCommon(req, shared) return p, lock, upserted, err } // When the function returns, a lock will be held on the policy if err == nil. // It is the caller's responsibility to unlock. -func (lm *lockManager) getPolicyCommon(storage logical.Storage, name string, upsert, derived, convergent, lockType bool) (*policy, *sync.RWMutex, bool, error) { - lock := lm.policyLock(name, lockType) +func (lm *lockManager) getPolicyCommon(req policyRequest, lockType bool) (*policy, *sync.RWMutex, bool, error) { + lock := lm.policyLock(req.name, lockType) var p *policy var err error @@ -164,7 +200,7 @@ func (lm *lockManager) getPolicyCommon(storage logical.Storage, name string, ups // Check if it's in our cache. If so, return right away. if lm.CacheActive() { lm.cacheMutex.RLock() - p = lm.cache[name] + p = lm.cache[req.name] if p != nil { lm.cacheMutex.RUnlock() return p, lock, false, nil @@ -173,7 +209,7 @@ func (lm *lockManager) getPolicyCommon(storage logical.Storage, name string, ups } // Load it from storage - p, err = lm.getStoredPolicy(storage, name) + p, err = lm.getStoredPolicy(req.storage, req.name) if err != nil { lm.UnlockPolicy(lock, lockType) return nil, nil, false, err @@ -182,7 +218,7 @@ func (lm *lockManager) getPolicyCommon(storage logical.Storage, name string, ups if p == nil { // This is the only place we upsert a new policy, so if upsert is not // specified, or the lock type is wrong, unlock before returning - if !upsert { + if !req.upsert { lm.UnlockPolicy(lock, lockType) return nil, nil, false, nil } @@ -192,22 +228,33 @@ func (lm *lockManager) getPolicyCommon(storage logical.Storage, name string, ups return nil, nil, false, errNeedExclusiveLock } - if !derived && convergent { - return nil, nil, false, fmt.Errorf("convergent encryption requires derivation to be enabled") + switch req.keyType { + case keyType_AES256_GCM96: + if req.convergent && !req.derived { + return nil, nil, false, fmt.Errorf("convergent encryption requires derivation to be enabled") + } + + case keyType_ECDSA_P256: + if req.derived || req.convergent { + return nil, nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %s", keyType_ECDSA_P256) + } + + default: + return nil, nil, false, fmt.Errorf("unsupported key type %v", req.keyType) } p = &policy{ - Name: name, - CipherMode: "aes-gcm", - Derived: derived, + Name: req.name, + Type: req.keyType, + Derived: req.derived, } - if derived { + if req.derived { p.KDF = kdf_hkdf_sha256 - p.ConvergentEncryption = convergent + p.ConvergentEncryption = req.convergent p.ConvergentVersion = 2 } - err = p.rotate(storage) + err = p.rotate(req.storage) if err != nil { lm.UnlockPolicy(lock, lockType) return nil, nil, false, err @@ -220,12 +267,12 @@ func (lm *lockManager) getPolicyCommon(storage logical.Storage, name string, ups defer lm.cacheMutex.Unlock() // Make sure a policy didn't appear. If so, it will only be set if // there was no error, so assume it's good and return that - exp := lm.cache[name] + exp := lm.cache[req.name] if exp != nil { return exp, lock, false, nil } if err == nil { - lm.cache[name] = p + lm.cache[req.name] = p } } @@ -239,7 +286,7 @@ func (lm *lockManager) getPolicyCommon(storage logical.Storage, name string, ups return nil, nil, false, errNeedExclusiveLock } - err = p.upgrade(storage) + err = p.upgrade(req.storage) if err != nil { lm.UnlockPolicy(lock, lockType) return nil, nil, false, err @@ -253,12 +300,12 @@ func (lm *lockManager) getPolicyCommon(storage logical.Storage, name string, ups defer lm.cacheMutex.Unlock() // Make sure a policy didn't appear. If so, it will only be set if // there was no error, so assume it's good and return that - exp := lm.cache[name] + exp := lm.cache[req.name] if exp != nil { return exp, lock, false, nil } if err == nil { - lm.cache[name] = p + lm.cache[req.name] = p } } diff --git a/builtin/logical/transit/path_encrypt.go b/builtin/logical/transit/path_encrypt.go index e1224ecd6..be814091f 100644 --- a/builtin/logical/transit/path_encrypt.go +++ b/builtin/logical/transit/path_encrypt.go @@ -33,6 +33,30 @@ func (b *backend) pathEncrypt() *framework.Path { Type: framework.TypeString, Description: "Nonce for when convergent encryption is used", }, + + "type": &framework.FieldSchema{ + Type: framework.TypeString, + Default: "aes256-gcm96", + Description: `When performing an upsert operation, the type of key +to create. Currently, "aes256-gcm96" (symmetric) is the +only type supported. Defaults to "aes256-gcm96".`, + }, + + "convergent_encryption": &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Whether to support convergent encryption. +This is only supported when using a key with +key derivation enabled and will require all +requests to carry both a context and 96-bit +(12-byte) nonce. The given nonce will be used +in place of a randomly generated nonce. As a +result, when the same context and nonce are +supplied, the same ciphertext is generated. It +is *very important* when using this mode that +you ensure that all nonces are unique for a +given context. Failing to do so will severely +impact the ciphertext's security.`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -96,7 +120,30 @@ func (b *backend) pathEncryptWrite( var lock *sync.RWMutex var upserted bool if req.Operation == logical.CreateOperation { - p, lock, upserted, err = b.lm.GetPolicyUpsert(req.Storage, name, len(context) != 0, false) + convergent := d.Get("convergent_encryption").(bool) + if convergent && len(context) == 0 { + return logical.ErrorResponse("convergent encryption requires derivation to be enabled, so context is required"), nil + } + + polReq := policyRequest{ + storage: req.Storage, + name: name, + derived: len(context) != 0, + convergent: convergent, + } + + keyType := d.Get("type").(string) + switch keyType { + case "aes256-gcm96": + polReq.keyType = keyType_AES256_GCM96 + case "ecdsa-p256": + return logical.ErrorResponse(fmt.Sprintf("key type %v not supported for this operation", keyType)), logical.ErrInvalidRequest + default: + return logical.ErrorResponse(fmt.Sprintf("unknown key type %v", keyType)), logical.ErrInvalidRequest + } + + p, lock, upserted, err = b.lm.GetPolicyUpsert(polReq) + } else { p, lock, err = b.lm.GetPolicyShared(req.Storage, name) } diff --git a/builtin/logical/transit/path_hash.go b/builtin/logical/transit/path_hash.go new file mode 100644 index 000000000..566ac52bb --- /dev/null +++ b/builtin/logical/transit/path_hash.go @@ -0,0 +1,116 @@ +package transit + +import ( + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func (b *backend) pathHash() *framework.Path { + return &framework.Path{ + Pattern: "hash" + framework.OptionalParamRegex("urlalgorithm"), + Fields: map[string]*framework.FieldSchema{ + "input": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "The base64-encoded input data", + }, + + "algorithm": &framework.FieldSchema{ + Type: framework.TypeString, + Default: "sha2-256", + Description: `Algorithm to use (POST body parameter). Valid values are: + +* sha2-224 +* sha2-256 +* sha2-384 +* sha2-512 + +Defaults to "sha2-256".`, + }, + + "urlalgorithm": &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Algorithm to use (POST URL parameter)`, + }, + + "format": &framework.FieldSchema{ + Type: framework.TypeString, + Default: "hex", + Description: `Encoding format to use. Can be "hex" or "base64". Defaults to "hex".`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathHashWrite, + }, + + HelpSynopsis: pathHashHelpSyn, + HelpDescription: pathHashHelpDesc, + } +} + +func (b *backend) pathHashWrite( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + inputB64 := d.Get("input").(string) + format := d.Get("format").(string) + algorithm := d.Get("urlalgorithm").(string) + if algorithm == "" { + algorithm = d.Get("algorithm").(string) + } + + input, err := base64.StdEncoding.DecodeString(inputB64) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest + } + + switch format { + case "hex": + case "base64": + default: + return logical.ErrorResponse(fmt.Sprintf("unsupported encoding format %s; must be \"hex\" or \"base64\"", format)), nil + } + + var hf hash.Hash + switch algorithm { + case "sha2-224": + hf = sha256.New224() + case "sha2-256": + hf = sha256.New() + case "sha2-384": + hf = sha512.New384() + case "sha2-512": + hf = sha512.New() + default: + return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil + } + hf.Write(input) + retBytes := hf.Sum(nil) + + var retStr string + switch format { + case "hex": + retStr = hex.EncodeToString(retBytes) + case "base64": + retStr = base64.StdEncoding.EncodeToString(retBytes) + } + + // Generate the response + resp := &logical.Response{ + Data: map[string]interface{}{ + "sum": retStr, + }, + } + return resp, nil +} + +const pathHashHelpSyn = `Generate a hash sum for input data` + +const pathHashHelpDesc = ` +Generates a hash sum of the given algorithm against the given input data. +` diff --git a/builtin/logical/transit/path_hash_test.go b/builtin/logical/transit/path_hash_test.go new file mode 100644 index 000000000..d59976df7 --- /dev/null +++ b/builtin/logical/transit/path_hash_test.go @@ -0,0 +1,87 @@ +package transit + +import ( + "testing" + + "github.com/hashicorp/vault/logical" +) + +func TestTransit_Hash(t *testing.T) { + var b *backend + sysView := logical.TestSystemView() + storage := &logical.InmemStorage{} + + b = Backend(&logical.BackendConfig{ + StorageView: storage, + System: sysView, + }) + + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "hash", + Data: map[string]interface{}{ + "input": "dGhlIHF1aWNrIGJyb3duIGZveA==", + }, + } + + doRequest := func(req *logical.Request, errExpected bool, expected string) { + resp, err := b.HandleRequest(req) + if err != nil && !errExpected { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if errExpected { + if !resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + return + } + if resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + sum, ok := resp.Data["sum"] + if !ok { + t.Fatal("no sum key found in returned data") + } + if sum.(string) != expected { + t.Fatal("mismatched hashes") + } + } + + // Test defaults -- sha2-256 + doRequest(req, false, "9ecb36561341d18eb65484e833efea61edc74b84cf5e6ae1b81c63533e25fc8f") + + // Test algorithm selection in the path + req.Path = "hash/sha2-224" + doRequest(req, false, "ea074a96cabc5a61f8298a2c470f019074642631a49e1c5e2f560865") + + // Reset and test algorithm selection in the data + req.Path = "hash" + req.Data["algorithm"] = "sha2-224" + doRequest(req, false, "ea074a96cabc5a61f8298a2c470f019074642631a49e1c5e2f560865") + + req.Data["algorithm"] = "sha2-384" + doRequest(req, false, "15af9ec8be783f25c583626e9491dbf129dd6dd620466fdf05b3a1d0bb8381d30f4d3ec29f923ff1e09a0f6b337365a6") + + req.Data["algorithm"] = "sha2-512" + doRequest(req, false, "d9d380f29b97ad6a1d92e987d83fa5a02653301e1006dd2bcd51afa59a9147e9caedaf89521abc0f0b682adcd47fb512b8343c834a32f326fe9bef00542ce887") + + // Test returning as base64 + req.Data["format"] = "base64" + doRequest(req, false, "2dOA8puXrWodkumH2D+loCZTMB4QBt0rzVGvpZqRR+nK7a+JUhq8DwtoKtzUf7USuDQ8g0oy8yb+m+8AVCzohw==") + + // Test bad input/format/algorithm + req.Data["format"] = "base92" + doRequest(req, true, "") + + req.Data["format"] = "hex" + req.Data["algorithm"] = "foobar" + doRequest(req, true, "") + + req.Data["algorithm"] = "sha2-256" + req.Data["input"] = "foobar" + doRequest(req, true, "") +} diff --git a/builtin/logical/transit/path_hmac.go b/builtin/logical/transit/path_hmac.go new file mode 100644 index 000000000..31c156f0b --- /dev/null +++ b/builtin/logical/transit/path_hmac.go @@ -0,0 +1,211 @@ +package transit + +import ( + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "fmt" + "hash" + "strconv" + "strings" + + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func (b *backend) pathHMAC() *framework.Path { + return &framework.Path{ + Pattern: "hmac/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "The key to use for the HMAC function", + }, + + "input": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "The base64-encoded input data", + }, + + "algorithm": &framework.FieldSchema{ + Type: framework.TypeString, + Default: "sha2-256", + Description: `Algorithm to use (POST body parameter). Valid values are: + +* sha2-224 +* sha2-256 +* sha2-384 +* sha2-512 + +Defaults to "sha2-256".`, + }, + + "urlalgorithm": &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Algorithm to use (POST URL parameter)`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathHMACWrite, + }, + + HelpSynopsis: pathHMACHelpSyn, + HelpDescription: pathHMACHelpDesc, + } +} + +func (b *backend) pathHMACWrite( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + inputB64 := d.Get("input").(string) + algorithm := d.Get("urlalgorithm").(string) + if algorithm == "" { + algorithm = d.Get("algorithm").(string) + } + + input, err := base64.StdEncoding.DecodeString(inputB64) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest + } + + // Get the policy + p, lock, err := b.lm.GetPolicyShared(req.Storage, name) + if lock != nil { + defer lock.RUnlock() + } + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest + } + + key, err := p.HMACKey(p.LatestVersion) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + if key == nil { + return nil, fmt.Errorf("HMAC key value could not be computed") + } + + var hf hash.Hash + switch algorithm { + case "sha2-224": + hf = hmac.New(sha256.New224, key) + case "sha2-256": + hf = hmac.New(sha256.New, key) + case "sha2-384": + hf = hmac.New(sha512.New384, key) + case "sha2-512": + hf = hmac.New(sha512.New, key) + default: + return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil + } + hf.Write(input) + retBytes := hf.Sum(nil) + + retStr := base64.StdEncoding.EncodeToString(retBytes) + retStr = fmt.Sprintf("vault:v%s:%s", strconv.Itoa(p.LatestVersion), retStr) + + // Generate the response + resp := &logical.Response{ + Data: map[string]interface{}{ + "hmac": retStr, + }, + } + return resp, nil +} + +func (b *backend) pathHMACVerify( + req *logical.Request, d *framework.FieldData, verificationHMAC string) (*logical.Response, error) { + + name := d.Get("name").(string) + inputB64 := d.Get("input").(string) + algorithm := d.Get("urlalgorithm").(string) + if algorithm == "" { + algorithm = d.Get("algorithm").(string) + } + + input, err := base64.StdEncoding.DecodeString(inputB64) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest + } + + // Verify the prefix + if !strings.HasPrefix(verificationHMAC, "vault:v") { + return logical.ErrorResponse("invalid HMAC to verify: no prefix"), logical.ErrInvalidRequest + } + + splitVerificationHMAC := strings.SplitN(strings.TrimPrefix(verificationHMAC, "vault:v"), ":", 2) + if len(splitVerificationHMAC) != 2 { + return logical.ErrorResponse("invalid HMAC: wrong number of fields"), logical.ErrInvalidRequest + } + + ver, err := strconv.Atoi(splitVerificationHMAC[0]) + if err != nil { + return logical.ErrorResponse("invalid HMAC: version number could not be decoded"), logical.ErrInvalidRequest + } + + verBytes, err := base64.StdEncoding.DecodeString(splitVerificationHMAC[1]) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("unable to decode verification HMAC as base64: %s", err)), logical.ErrInvalidRequest + } + + // Get the policy + p, lock, err := b.lm.GetPolicyShared(req.Storage, name) + if lock != nil { + defer lock.RUnlock() + } + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest + } + + if ver > p.LatestVersion { + return logical.ErrorResponse("invalid HMAC: version is too new"), logical.ErrInvalidRequest + } + + if p.MinDecryptionVersion > 0 && ver < p.MinDecryptionVersion { + return logical.ErrorResponse("cannot verify HMAC: version is too old (disallowed by policy)"), logical.ErrInvalidRequest + } + + key, err := p.HMACKey(ver) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + if key == nil { + return nil, fmt.Errorf("HMAC key value could not be computed") + } + + var hf hash.Hash + switch algorithm { + case "sha2-224": + hf = hmac.New(sha256.New224, key) + case "sha2-256": + hf = hmac.New(sha256.New, key) + case "sha2-384": + hf = hmac.New(sha512.New384, key) + case "sha2-512": + hf = hmac.New(sha512.New, key) + default: + return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil + } + hf.Write(input) + retBytes := hf.Sum(nil) + + return &logical.Response{ + Data: map[string]interface{}{ + "valid": hmac.Equal(retBytes, verBytes), + }, + }, nil +} + +const pathHMACHelpSyn = `Generate an HMAC for input data using the named key` + +const pathHMACHelpDesc = ` +Generates an HMAC sum of the given algorithm and key against the given input data. +` diff --git a/builtin/logical/transit/path_hmac_test.go b/builtin/logical/transit/path_hmac_test.go new file mode 100644 index 000000000..fc6b622ec --- /dev/null +++ b/builtin/logical/transit/path_hmac_test.go @@ -0,0 +1,183 @@ +package transit + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/vault/logical" +) + +func TestTransit_HMAC(t *testing.T) { + var b *backend + sysView := logical.TestSystemView() + storage := &logical.InmemStorage{} + + b = Backend(&logical.BackendConfig{ + StorageView: storage, + System: sysView, + }) + + // First create a key + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + } + _, err := b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + + // Now, change the key value to something we control + p, lock, err := b.lm.GetPolicyShared(storage, "foo") + if err != nil { + t.Fatal(err) + } + // We don't care as we're the only one using this + lock.RUnlock() + keyEntry := p.Keys[p.LatestVersion] + keyEntry.HMACKey = []byte("01234567890123456789012345678901") + p.Keys[p.LatestVersion] = keyEntry + if err = p.Persist(storage); err != nil { + t.Fatal(err) + } + + req.Path = "hmac/foo" + req.Data = map[string]interface{}{ + "input": "dGhlIHF1aWNrIGJyb3duIGZveA==", + } + + doRequest := func(req *logical.Request, errExpected bool, expected string) { + path := req.Path + defer func() { req.Path = path }() + + resp, err := b.HandleRequest(req) + if err != nil && !errExpected { + panic(fmt.Sprintf("%v", err)) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if errExpected { + if !resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + return + } + if resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + value, ok := resp.Data["hmac"] + if !ok { + t.Fatalf("no hmac key found in returned data, got resp data %#v", resp.Data) + } + if value.(string) != expected { + panic(fmt.Sprintf("mismatched hashes; expected %s, got resp data %#v", expected, resp.Data)) + } + + // Now verify + req.Path = strings.Replace(req.Path, "hmac", "verify", -1) + req.Data["hmac"] = value.(string) + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatalf("%v: %v", err, resp) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.Data["valid"].(bool) == false { + panic(fmt.Sprintf("error validating hmac;\nreq:\n%#v\nresp:\n%#v", *req, *resp)) + } + } + + // Comparisons are against values generated via openssl + + // Test defaults -- sha2-256 + doRequest(req, false, "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=") + + // Test algorithm selection in the path + req.Path = "hmac/foo/sha2-224" + doRequest(req, false, "vault:v1:3p+ZWVquYDvu2dSTCa65Y3fgoMfIAc6fNaBbtg==") + + // Reset and test algorithm selection in the data + req.Path = "hmac/foo" + req.Data["algorithm"] = "sha2-224" + doRequest(req, false, "vault:v1:3p+ZWVquYDvu2dSTCa65Y3fgoMfIAc6fNaBbtg==") + + req.Data["algorithm"] = "sha2-384" + doRequest(req, false, "vault:v1:jDB9YXdPjpmr29b1JCIEJO93IydlKVfD9mA2EO9OmJtJQg3QAV5tcRRRb7IQGW9p") + + req.Data["algorithm"] = "sha2-512" + doRequest(req, false, "vault:v1:PSXLXvkvKF4CpU65e2bK1tGBZQpcpCEM32fq2iUoiTyQQCfBcGJJItQ+60tMwWXAPQrC290AzTrNJucGrr4GFA==") + + // Test returning as base64 + req.Data["format"] = "base64" + doRequest(req, false, "vault:v1:PSXLXvkvKF4CpU65e2bK1tGBZQpcpCEM32fq2iUoiTyQQCfBcGJJItQ+60tMwWXAPQrC290AzTrNJucGrr4GFA==") + + req.Data["algorithm"] = "foobar" + doRequest(req, true, "") + + req.Data["algorithm"] = "sha2-256" + req.Data["input"] = "foobar" + doRequest(req, true, "") + req.Data["input"] = "dGhlIHF1aWNrIGJyb3duIGZveA==" + + // Rotate + err = p.rotate(storage) + if err != nil { + t.Fatal(err) + } + keyEntry = p.Keys[2] + // Set to another value we control + keyEntry.HMACKey = []byte("12345678901234567890123456789012") + p.Keys[2] = keyEntry + if err = p.Persist(storage); err != nil { + t.Fatal(err) + } + + doRequest(req, false, "vault:v2:Dt+mO/B93kuWUbGMMobwUNX5Wodr6dL3JH4DMfpQ0kw=") + + // Verify a previous version + req.Path = "verify/foo" + + req.Data["hmac"] = "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=" + resp, err := b.HandleRequest(req) + if err != nil { + t.Fatalf("%v: %v", err, resp) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.Data["valid"].(bool) == false { + t.Fatalf("error validating hmac\nreq\n%#v\nresp\n%#v", *req, *resp) + } + + // Try a bad value + req.Data["hmac"] = "vault:v1:UcBvm4VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=" + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatalf("%v: %v", err, resp) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.Data["valid"].(bool) { + t.Fatalf("expected error validating hmac") + } + + // Set min decryption version, attempt to verify + p.MinDecryptionVersion = 2 + if err = p.Persist(storage); err != nil { + t.Fatal(err) + } + + req.Data["hmac"] = "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=" + resp, err = b.HandleRequest(req) + if err == nil { + t.Fatalf("expected an error, got response %#v", resp) + } + if err != logical.ErrInvalidRequest { + t.Fatalf("expected invalid request error, got %v", err) + } +} diff --git a/builtin/logical/transit/path_keys.go b/builtin/logical/transit/path_keys.go index 7c68c6bab..a64a7e4a4 100644 --- a/builtin/logical/transit/path_keys.go +++ b/builtin/logical/transit/path_keys.go @@ -1,6 +1,7 @@ package transit import ( + "crypto/elliptic" "fmt" "strconv" @@ -17,10 +18,19 @@ func (b *backend) pathKeys() *framework.Path { Description: "Name of the key", }, + "type": &framework.FieldSchema{ + Type: framework.TypeString, + Default: "aes256-gcm96", + Description: `The type of key to create. Currently, +"aes256-gcm96" (symmetric) and "ecdsa-p256" (asymmetric) are +supported. Defaults to "aes256-gcm96".`, + }, + "derived": &framework.FieldSchema{ Type: framework.TypeBool, Description: `Enables key derivation mode. This -allows for per-transaction unique keys.`, +allows for per-transaction unique +keys for encryption operations.`, }, "convergent_encryption": &framework.FieldSchema{ @@ -56,12 +66,28 @@ func (b *backend) pathPolicyWrite( name := d.Get("name").(string) derived := d.Get("derived").(bool) convergent := d.Get("convergent_encryption").(bool) + keyType := d.Get("type").(string) if !derived && convergent { return logical.ErrorResponse("convergent encryption requires derivation to be enabled"), nil } - p, lock, upserted, err := b.lm.GetPolicyUpsert(req.Storage, name, derived, convergent) + polReq := policyRequest{ + storage: req.Storage, + name: name, + derived: derived, + convergent: convergent, + } + switch keyType { + case "aes256-gcm96": + polReq.keyType = keyType_AES256_GCM96 + case "ecdsa-p256": + polReq.keyType = keyType_ECDSA_P256 + default: + return logical.ErrorResponse(fmt.Sprintf("unknown key type %v", keyType)), logical.ErrInvalidRequest + } + + p, lock, upserted, err := b.lm.GetPolicyUpsert(polReq) if lock != nil { defer lock.RUnlock() } @@ -99,13 +125,14 @@ func (b *backend) pathPolicyRead( resp := &logical.Response{ Data: map[string]interface{}{ "name": p.Name, - "cipher_mode": p.CipherMode, + "type": p.Type.String(), "derived": p.Derived, "deletion_allowed": p.DeletionAllowed, "min_decryption_version": p.MinDecryptionVersion, "latest_version": p.LatestVersion, }, } + if p.Derived { switch p.KDF { case kdf_hmac_sha256_counter: @@ -120,11 +147,28 @@ func (b *backend) pathPolicyRead( } } - retKeys := map[string]int64{} - for k, v := range p.Keys { - retKeys[strconv.Itoa(k)] = v.CreationTime + switch p.Type { + case keyType_AES256_GCM96: + retKeys := map[string]int64{} + for k, v := range p.Keys { + retKeys[strconv.Itoa(k)] = v.CreationTime + } + resp.Data["keys"] = retKeys + + case keyType_ECDSA_P256: + type ecdsaKey struct { + Name string `json:"name"` + PublicKey string `json:"public_key"` + } + retKeys := map[string]ecdsaKey{} + for k, v := range p.Keys { + retKeys[strconv.Itoa(k)] = ecdsaKey{ + Name: elliptic.P256().Params().Name, + PublicKey: v.FormattedPublicKey, + } + } + resp.Data["keys"] = retKeys } - resp.Data["keys"] = retKeys return resp, nil } diff --git a/builtin/logical/transit/path_random.go b/builtin/logical/transit/path_random.go new file mode 100644 index 000000000..f9190b7df --- /dev/null +++ b/builtin/logical/transit/path_random.go @@ -0,0 +1,97 @@ +package transit + +import ( + "encoding/base64" + "encoding/hex" + "fmt" + "strconv" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func (b *backend) pathRandom() *framework.Path { + return &framework.Path{ + Pattern: "random" + framework.OptionalParamRegex("urlbytes"), + Fields: map[string]*framework.FieldSchema{ + "urlbytes": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "The number of bytes to generate (POST URL parameter)", + }, + + "bytes": &framework.FieldSchema{ + Type: framework.TypeInt, + Default: 32, + Description: "The number of bytes to generate (POST body parameter). Defaults to 32 (256 bits).", + }, + + "format": &framework.FieldSchema{ + Type: framework.TypeString, + Default: "base64", + Description: `Encoding format to use. Can be "hex" or "base64". Defaults to "base64".`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRandomWrite, + }, + + HelpSynopsis: pathRandomHelpSyn, + HelpDescription: pathRandomHelpDesc, + } +} + +func (b *backend) pathRandomWrite( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + bytes := 0 + var err error + strBytes := d.Get("urlbytes").(string) + if strBytes != "" { + bytes, err = strconv.Atoi(strBytes) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("error parsing url-set byte count: %s", err)), nil + } + } else { + bytes = d.Get("bytes").(int) + } + format := d.Get("format").(string) + + if bytes < 1 { + return logical.ErrorResponse(`"bytes" cannot be less than 1`), nil + } + + switch format { + case "hex": + case "base64": + default: + return logical.ErrorResponse(fmt.Sprintf("unsupported encoding format %s; must be \"hex\" or \"base64\"", format)), nil + } + + randBytes, err := uuid.GenerateRandomBytes(bytes) + if err != nil { + return nil, err + } + + var retStr string + switch format { + case "hex": + retStr = hex.EncodeToString(randBytes) + case "base64": + retStr = base64.StdEncoding.EncodeToString(randBytes) + } + + // Generate the response + resp := &logical.Response{ + Data: map[string]interface{}{ + "random_bytes": retStr, + }, + } + return resp, nil +} + +const pathRandomHelpSyn = `Generate random bytes` + +const pathRandomHelpDesc = ` +This function can be used to generate high-entropy random bytes. +` diff --git a/builtin/logical/transit/path_random_test.go b/builtin/logical/transit/path_random_test.go new file mode 100644 index 000000000..a2711ce0d --- /dev/null +++ b/builtin/logical/transit/path_random_test.go @@ -0,0 +1,98 @@ +package transit + +import ( + "encoding/base64" + "encoding/hex" + "reflect" + "testing" + + "github.com/hashicorp/vault/logical" +) + +func TestTransit_Random(t *testing.T) { + var b *backend + sysView := logical.TestSystemView() + storage := &logical.InmemStorage{} + + b = Backend(&logical.BackendConfig{ + StorageView: storage, + System: sysView, + }) + + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "random", + Data: map[string]interface{}{}, + } + + doRequest := func(req *logical.Request, errExpected bool, format string, numBytes int) { + getResponse := func() []byte { + resp, err := b.HandleRequest(req) + if err != nil && !errExpected { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if errExpected { + if !resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + return nil + } + if resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + if _, ok := resp.Data["random_bytes"]; !ok { + t.Fatal("no random_bytes found in response") + } + + outputStr := resp.Data["random_bytes"].(string) + var outputBytes []byte + switch format { + case "base64": + outputBytes, err = base64.StdEncoding.DecodeString(outputStr) + case "hex": + outputBytes, err = hex.DecodeString(outputStr) + default: + t.Fatal("unknown format") + } + if err != nil { + t.Fatal(err) + } + + return outputBytes + } + + rand1 := getResponse() + // Expected error + if rand1 == nil { + return + } + rand2 := getResponse() + if len(rand1) != numBytes || len(rand2) != numBytes { + t.Fatal("length of output random bytes not what is exepcted") + } + if reflect.DeepEqual(rand1, rand2) { + t.Fatal("found identical ouputs") + } + } + + // Test defaults + doRequest(req, false, "base64", 32) + + // Test size selection in the path + req.Path = "random/24" + req.Data["format"] = "hex" + doRequest(req, false, "hex", 24) + + // Test bad input/format + req.Path = "random" + req.Data["format"] = "base92" + doRequest(req, true, "", 0) + + req.Data["format"] = "hex" + req.Data["bytes"] = -1 + doRequest(req, true, "", 0) +} diff --git a/builtin/logical/transit/path_sign_verify.go b/builtin/logical/transit/path_sign_verify.go new file mode 100644 index 000000000..ff018808e --- /dev/null +++ b/builtin/logical/transit/path_sign_verify.go @@ -0,0 +1,258 @@ +package transit + +import ( + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "fmt" + "hash" + + "github.com/hashicorp/vault/helper/errutil" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func (b *backend) pathSign() *framework.Path { + return &framework.Path{ + Pattern: "sign/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "The key to use", + }, + + "input": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "The base64-encoded input data", + }, + + "algorithm": &framework.FieldSchema{ + Type: framework.TypeString, + Default: "sha2-256", + Description: `Hash algorithm to use (POST body parameter). Valid values are: + +* sha2-224 +* sha2-256 +* sha2-384 +* sha2-512 + +Defaults to "sha2-256".`, + }, + + "urlalgorithm": &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Hash algorithm to use (POST URL parameter)`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathSignWrite, + }, + + HelpSynopsis: pathSignHelpSyn, + HelpDescription: pathSignHelpDesc, + } +} + +func (b *backend) pathVerify() *framework.Path { + return &framework.Path{ + Pattern: "verify/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "The key to use", + }, + + "signature": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "The signature, including vault header/key version", + }, + + "hmac": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "The HMAC, including vault header/key version", + }, + + "input": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "The base64-encoded input data to verify", + }, + + "urlalgorithm": &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Hash algorithm to use (POST URL parameter)`, + }, + + "algorithm": &framework.FieldSchema{ + Type: framework.TypeString, + Default: "sha2-256", + Description: `Hash algorithm to use (POST body parameter). Valid values are: + +* sha2-224 +* sha2-256 +* sha2-384 +* sha2-512 + +Defaults to "sha2-256".`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathVerifyWrite, + }, + + HelpSynopsis: pathVerifyHelpSyn, + HelpDescription: pathVerifyHelpDesc, + } +} + +func (b *backend) pathSignWrite( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + inputB64 := d.Get("input").(string) + algorithm := d.Get("urlalgorithm").(string) + if algorithm == "" { + algorithm = d.Get("algorithm").(string) + } + + input, err := base64.StdEncoding.DecodeString(inputB64) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest + } + + var hf hash.Hash + switch algorithm { + case "sha2-224": + hf = sha256.New224() + case "sha2-256": + hf = sha256.New() + case "sha2-384": + hf = sha512.New384() + case "sha2-512": + hf = sha512.New() + default: + return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil + } + hf.Write(input) + hashedInput := hf.Sum(nil) + + // Get the policy + p, lock, err := b.lm.GetPolicyShared(req.Storage, name) + if lock != nil { + defer lock.RUnlock() + } + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest + } + + if !p.Type.SigningSupported() { + return logical.ErrorResponse(fmt.Sprintf("key type %v does not support signing", p.Type)), logical.ErrInvalidRequest + } + + sig, err := p.Sign(hashedInput) + if err != nil { + return nil, err + } + if sig == "" { + return nil, fmt.Errorf("signature could not be computed") + } + + // Generate the response + resp := &logical.Response{ + Data: map[string]interface{}{ + "signature": sig, + }, + } + return resp, nil +} + +func (b *backend) pathVerifyWrite( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + + sig := d.Get("signature").(string) + hmac := d.Get("hmac").(string) + switch { + case sig != "" && hmac != "": + return logical.ErrorResponse("provide one of 'signature' or 'hmac'"), logical.ErrInvalidRequest + + case sig == "" && hmac == "": + return logical.ErrorResponse("neither a 'signature' nor an 'hmac' were given to verify"), logical.ErrInvalidRequest + + case hmac != "": + return b.pathHMACVerify(req, d, hmac) + } + + name := d.Get("name").(string) + inputB64 := d.Get("input").(string) + algorithm := d.Get("urlalgorithm").(string) + if algorithm == "" { + algorithm = d.Get("algorithm").(string) + } + + input, err := base64.StdEncoding.DecodeString(inputB64) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest + } + + var hf hash.Hash + switch algorithm { + case "sha2-224": + hf = sha256.New224() + case "sha2-256": + hf = sha256.New() + case "sha2-384": + hf = sha512.New384() + case "sha2-512": + hf = sha512.New() + default: + return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil + } + hf.Write(input) + hashedInput := hf.Sum(nil) + + // Get the policy + p, lock, err := b.lm.GetPolicyShared(req.Storage, name) + if lock != nil { + defer lock.RUnlock() + } + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest + } + + valid, err := p.VerifySignature(hashedInput, sig) + if err != nil { + switch err.(type) { + case errutil.UserError: + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + case errutil.InternalError: + return nil, err + default: + return nil, err + } + } + + // Generate the response + resp := &logical.Response{ + Data: map[string]interface{}{ + "valid": valid, + }, + } + return resp, nil +} + +const pathSignHelpSyn = `Generate a signature for input data using the named key` + +const pathSignHelpDesc = ` +Generates a signature of the input data using the named key and the given hash algorithm. +` +const pathVerifyHelpSyn = `Verify a signature or HMAC for input data created using the named key` + +const pathVerifyHelpDesc = ` +Verifies a signature or HMAC of the input data using the named key and the given hash algorithm. +` diff --git a/builtin/logical/transit/path_sign_verify_test.go b/builtin/logical/transit/path_sign_verify_test.go new file mode 100644 index 000000000..511e0053b --- /dev/null +++ b/builtin/logical/transit/path_sign_verify_test.go @@ -0,0 +1,201 @@ +package transit + +import ( + "testing" + + "github.com/hashicorp/vault/logical" +) + +func TestTransit_SignVerify(t *testing.T) { + var b *backend + sysView := logical.TestSystemView() + storage := &logical.InmemStorage{} + + b = Backend(&logical.BackendConfig{ + StorageView: storage, + System: sysView, + }) + + // First create a key + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + Data: map[string]interface{}{ + "type": "ecdsa-p256", + }, + } + _, err := b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + + // Now, change the key value to something we control + p, lock, err := b.lm.GetPolicyShared(storage, "foo") + if err != nil { + t.Fatal(err) + } + // We don't care as we're the only one using this + lock.RUnlock() + + // Useful code to output a key for openssl verification + /* + { + key := p.Keys[p.LatestVersion] + keyBytes, _ := x509.MarshalECPrivateKey(&ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: elliptic.P256(), + X: key.X, + Y: key.Y, + }, + D: key.D, + }) + pemBlock := &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: keyBytes, + } + pemBytes := pem.EncodeToMemory(pemBlock) + t.Fatalf("X: %s, Y: %s, D: %s, marshaled: %s", key.X.Text(16), key.Y.Text(16), key.D.Text(16), string(pemBytes)) + } + */ + + keyEntry := p.Keys[p.LatestVersion] + _, ok := keyEntry.EC_X.SetString("7336010a6da5935113d26d9ea4bb61b3b8d102c9a8083ed432f9b58fd7e80686", 16) + if !ok { + t.Fatal("could not set X") + } + _, ok = keyEntry.EC_Y.SetString("4040aa31864691a8a9e7e3ec9250e85425b797ad7be34ba8df62bfbad45ebb0e", 16) + if !ok { + t.Fatal("could not set Y") + } + _, ok = keyEntry.EC_D.SetString("99e5569be8683a2691dfc560ca9dfa71e887867a3af60635a08a3e3655aba3ef", 16) + if !ok { + t.Fatal("could not set D") + } + p.Keys[p.LatestVersion] = keyEntry + if err = p.Persist(storage); err != nil { + t.Fatal(err) + } + req.Data = map[string]interface{}{ + "input": "dGhlIHF1aWNrIGJyb3duIGZveA==", + } + + signRequest := func(req *logical.Request, errExpected bool, postpath string) string { + req.Path = "sign/foo" + postpath + resp, err := b.HandleRequest(req) + if err != nil && !errExpected { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if errExpected { + if !resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + return "" + } + if resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + value, ok := resp.Data["signature"] + if !ok { + t.Fatalf("no signature key found in returned data, got resp data %#v", resp.Data) + } + return value.(string) + } + + verifyRequest := func(req *logical.Request, errExpected bool, postpath, sig string) { + req.Path = "verify/foo" + postpath + req.Data["signature"] = sig + resp, err := b.HandleRequest(req) + if err != nil && !errExpected { + t.Fatalf("got error: %v, sig was %v", err, sig) + } + if errExpected { + if resp != nil && !resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + return + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + value, ok := resp.Data["valid"] + if !ok { + t.Fatalf("no valid key found in returned data, got resp data %#v", resp.Data) + } + if !value.(bool) && !errExpected { + t.Fatalf("verification failed; req was %#v, resp is %#v", *req, *resp) + } + } + + // Comparisons are against values generated via openssl + + // Test defaults -- sha2-256 + sig := signRequest(req, false, "") + verifyRequest(req, false, "", sig) + + // Test a bad signature + verifyRequest(req, true, "", sig[0:len(sig)-2]) + + // Test a signature generated with the same key by openssl + sig = `vault:v1:MEUCIAgnEl9V8P305EBAlz68Nq4jZng5fE8k6MactcnlUw9dAiEAvJVePg3dazW6MaW7lRAVtEz82QJDVmR98tXCl8Pc7DA=` + verifyRequest(req, false, "", sig) + + // Test algorithm selection in the path + sig = signRequest(req, false, "/sha2-224") + verifyRequest(req, false, "/sha2-224", sig) + + // Reset and test algorithm selection in the data + req.Data["algorithm"] = "sha2-224" + sig = signRequest(req, false, "") + verifyRequest(req, false, "", sig) + + req.Data["algorithm"] = "sha2-384" + sig = signRequest(req, false, "") + verifyRequest(req, false, "", sig) + + // Test 512 and save sig for later to ensure we can't validate once min + // decryption version is set + req.Data["algorithm"] = "sha2-512" + sig = signRequest(req, false, "") + verifyRequest(req, false, "", sig) + + v1sig := sig + + // Test bad algorithm + req.Data["algorithm"] = "foobar" + signRequest(req, true, "") + + // Test bad input + req.Data["algorithm"] = "sha2-256" + req.Data["input"] = "foobar" + signRequest(req, true, "") + + // Rotate and set min decryption version + err = p.rotate(storage) + if err != nil { + t.Fatal(err) + } + err = p.rotate(storage) + if err != nil { + t.Fatal(err) + } + + p.MinDecryptionVersion = 2 + if err = p.Persist(storage); err != nil { + t.Fatal(err) + } + + req.Data["input"] = "dGhlIHF1aWNrIGJyb3duIGZveA==" + req.Data["algorithm"] = "sha2-256" + // Make sure signing still works fine + sig = signRequest(req, false, "") + verifyRequest(req, false, "", sig) + // Now try the v1 + verifyRequest(req, true, "", v1sig) +} diff --git a/builtin/logical/transit/policy.go b/builtin/logical/transit/policy.go index b619c6192..79cd34d27 100644 --- a/builtin/logical/transit/policy.go +++ b/builtin/logical/transit/policy.go @@ -4,11 +4,19 @@ import ( "bytes" "crypto/aes" "crypto/cipher" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/hmac" + "crypto/rand" "crypto/sha256" + "crypto/x509" + "encoding/asn1" "encoding/base64" "encoding/json" + "encoding/pem" "fmt" "io" + "math/big" "strconv" "strings" "time" @@ -22,18 +30,79 @@ import ( "github.com/hashicorp/vault/logical" ) -// Careful with iota; don't put anything before it in this const block +// Careful with iota; don't put anything before it in this const block because +// we need the default of zero to be the old-style KDF const ( kdf_hmac_sha256_counter = iota // built-in helper kdf_hkdf_sha256 // golang.org/x/crypto/hkdf ) -const ErrTooOld = "ciphertext version is disallowed by policy (too old)" +// Or this one...we need the default of zero to be the original AES256-GCM96 +const ( + keyType_AES256_GCM96 = iota + keyType_ECDSA_P256 +) + +const ErrTooOld = "ciphertext or signature version is disallowed by policy (too old)" + +type ecdsaSignature struct { + R, S *big.Int +} + +type KeyType int + +func (kt KeyType) EncryptionSupported() bool { + switch kt { + case keyType_AES256_GCM96: + return true + } + return false +} + +func (kt KeyType) DecryptionSupported() bool { + switch kt { + case keyType_AES256_GCM96: + return true + } + return false +} + +func (kt KeyType) SigningSupported() bool { + switch kt { + case keyType_ECDSA_P256: + return true + } + return false +} + +func (kt KeyType) DerivationSupported() bool { + switch kt { + case keyType_AES256_GCM96: + return true + } + return false +} + +func (kt KeyType) String() string { + switch kt { + case keyType_AES256_GCM96: + return "aes256-gcm96" + case keyType_ECDSA_P256: + return "ecdsa-p256" + } + + return "[unknown]" +} // keyEntry stores the key and metadata type keyEntry struct { - Key []byte `json:"key"` - CreationTime int64 `json:"creation_time"` + AESKey []byte `json:"key"` + HMACKey []byte `json:"hmac_key"` + CreationTime int64 `json:"creation_time"` + EC_X *big.Int `json:"ec_x"` + EC_Y *big.Int `json:"ec_y"` + EC_D *big.Int `json:"ec_d"` + FormattedPublicKey string `json:"public_key"` } // keyEntryMap is used to allow JSON marshal/unmarshal @@ -67,10 +136,9 @@ func (kem keyEntryMap) UnmarshalJSON(data []byte) error { // Policy is the struct used to store metadata type policy struct { - Name string `json:"name"` - Key []byte `json:"key,omitempty"` //DEPRECATED - Keys keyEntryMap `json:"keys"` - CipherMode string `json:"cipher"` + Name string `json:"name"` + Key []byte `json:"key,omitempty"` //DEPRECATED + Keys keyEntryMap `json:"keys"` // Derived keys MUST provide a context and the master underlying key is // never used. If convergent encryption is true, the context will be used @@ -95,6 +163,9 @@ type policy struct { // The version of the convergent nonce to use ConvergentVersion int `json:"convergent_version"` + + // The type of key + Type KeyType `json:"type"` } // ArchivedKeys stores old keys. This is used to keep the key loading time sane @@ -252,7 +323,8 @@ func (p *policy) needsUpgrade() bool { return true } - // With archiving, past assumptions about the length of the keys map are no longer valid + // With archiving, past assumptions about the length of the keys map are no + // longer valid if p.LatestVersion == 0 && len(p.Keys) != 0 { return true } @@ -284,7 +356,8 @@ func (p *policy) upgrade(storage logical.Storage) error { persistNeeded = true } - // With archiving, past assumptions about the length of the keys map are no longer valid + // With archiving, past assumptions about the length of the keys map are no + // longer valid if p.LatestVersion == 0 && len(p.Keys) != 0 { p.LatestVersion = len(p.Keys) persistNeeded = true @@ -322,6 +395,10 @@ func (p *policy) upgrade(storage logical.Storage) error { // is required, otherwise the KDF mode is used with the context to derive the // proper key. func (p *policy) DeriveKey(context []byte, ver int) ([]byte, error) { + if !p.Type.DerivationSupported() { + return nil, errutil.UserError{Err: fmt.Sprintf("derivation not supported for key type %v", p.Type)} + } + if p.Keys == nil || p.LatestVersion == 0 { return nil, errutil.InternalError{Err: "unable to access the key; no key versions found"} } @@ -332,7 +409,7 @@ func (p *policy) DeriveKey(context []byte, ver int) ([]byte, error) { // Fast-path non-derived keys if !p.Derived { - return p.Keys[ver].Key, nil + return p.Keys[ver].AESKey, nil } // Ensure a context is provided @@ -344,9 +421,9 @@ func (p *policy) DeriveKey(context []byte, ver int) ([]byte, error) { case kdf_hmac_sha256_counter: prf := kdf.HMACSHA256PRF prfLen := kdf.HMACSHA256PRFLen - return kdf.CounterMode(prf, prfLen, p.Keys[ver].Key, context, 256) + return kdf.CounterMode(prf, prfLen, p.Keys[ver].AESKey, context, 256) case kdf_hkdf_sha256: - reader := hkdf.New(sha256.New, p.Keys[ver].Key, nil, context) + reader := hkdf.New(sha256.New, p.Keys[ver].AESKey, nil, context) derBytes := bytes.NewBuffer(nil) derBytes.Grow(32) limReader := &io.LimitedReader{ @@ -367,6 +444,17 @@ func (p *policy) DeriveKey(context []byte, ver int) ([]byte, error) { } func (p *policy) Encrypt(context, nonce []byte, value string) (string, error) { + if !p.Type.EncryptionSupported() { + return "", errutil.UserError{Err: fmt.Sprintf("message encryption not supported for key type %v", p.Type)} + } + + // Guard against a potentially invalid key type + switch p.Type { + case keyType_AES256_GCM96: + default: + return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} + } + // Decode the plaintext value plaintext, err := base64.StdEncoding.DecodeString(value) if err != nil { @@ -379,11 +467,11 @@ func (p *policy) Encrypt(context, nonce []byte, value string) (string, error) { return "", err } - // Guard against a potentially invalid cipher-mode - switch p.CipherMode { - case "aes-gcm": + // Guard against a potentially invalid key type + switch p.Type { + case keyType_AES256_GCM96: default: - return "", errutil.InternalError{Err: "unsupported cipher mode"} + return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} } // Setup the cipher @@ -399,16 +487,16 @@ func (p *policy) Encrypt(context, nonce []byte, value string) (string, error) { } if p.ConvergentEncryption { - if p.ConvergentVersion == 1 { + switch p.ConvergentVersion { + case 1: if len(nonce) != gcm.NonceSize() { return "", errutil.UserError{Err: fmt.Sprintf("base64-decoded nonce must be %d bytes long when using convergent encryption with this key", gcm.NonceSize())} } - } else { - nonceKey, err := p.DeriveKey(append(key, plaintext...), p.LatestVersion) - if err != nil { - return "", err - } - nonce = nonceKey[:gcm.NonceSize()] + default: + nonceHmac := hmac.New(sha256.New, context) + nonceHmac.Write(plaintext) + nonceSum := nonceHmac.Sum(nil) + nonce = nonceSum[:gcm.NonceSize()] } } else { // Compute random nonce @@ -437,6 +525,10 @@ func (p *policy) Encrypt(context, nonce []byte, value string) (string, error) { } func (p *policy) Decrypt(context, nonce []byte, value string) (string, error) { + if !p.Type.DecryptionSupported() { + return "", errutil.UserError{Err: fmt.Sprintf("message decryption not supported for key type %v", p.Type)} + } + // Verify the prefix if !strings.HasPrefix(value, "vault:v") { return "", errutil.UserError{Err: "invalid ciphertext: no prefix"} @@ -476,11 +568,11 @@ func (p *policy) Decrypt(context, nonce []byte, value string) (string, error) { return "", err } - // Guard against a potentially invalid cipher-mode - switch p.CipherMode { - case "aes-gcm": + // Guard against a potentially invalid key type + switch p.Type { + case keyType_AES256_GCM96: default: - return "", errutil.InternalError{Err: "unsupported cipher mode"} + return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} } // Decode the base64 @@ -519,6 +611,124 @@ func (p *policy) Decrypt(context, nonce []byte, value string) (string, error) { return base64.StdEncoding.EncodeToString(plain), nil } +func (p *policy) HMACKey(version int) ([]byte, error) { + if version < p.MinDecryptionVersion { + return nil, fmt.Errorf("key version disallowed by policy (minimum is %d)", p.MinDecryptionVersion) + } + + if version > p.LatestVersion { + return nil, fmt.Errorf("key version does not exist; latest key version is %d", p.LatestVersion) + } + + if p.Keys[version].HMACKey == nil { + return nil, fmt.Errorf("no HMAC key exists for that key version") + } + + return p.Keys[version].HMACKey, nil +} + +func (p *policy) Sign(hashedInput []byte) (string, error) { + if !p.Type.SigningSupported() { + return "", fmt.Errorf("message signing not supported for key type %v", p.Type) + } + + var sig []byte + switch p.Type { + case keyType_ECDSA_P256: + keyParams := p.Keys[p.LatestVersion] + key := &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: elliptic.P256(), + X: keyParams.EC_X, + Y: keyParams.EC_Y, + }, + D: keyParams.EC_D, + } + r, s, err := ecdsa.Sign(rand.Reader, key, hashedInput) + if err != nil { + return "", err + } + marshaledSig, err := asn1.Marshal(ecdsaSignature{ + R: r, + S: s, + }) + if err != nil { + return "", err + } + sig = marshaledSig + + default: + return "", fmt.Errorf("unsupported key type %v", p.Type) + } + + // Convert to base64 + encoded := base64.StdEncoding.EncodeToString(sig) + + // Prepend some information + encoded = "vault:v" + strconv.Itoa(p.LatestVersion) + ":" + encoded + + return encoded, nil +} + +func (p *policy) VerifySignature(hashedInput []byte, sig string) (bool, error) { + if !p.Type.SigningSupported() { + return false, errutil.UserError{Err: fmt.Sprintf("message verification not supported for key type %v", p.Type)} + } + + // Verify the prefix + if !strings.HasPrefix(sig, "vault:v") { + return false, errutil.UserError{Err: "invalid signature: no prefix"} + } + + splitVerSig := strings.SplitN(strings.TrimPrefix(sig, "vault:v"), ":", 2) + if len(splitVerSig) != 2 { + return false, errutil.UserError{Err: "invalid signature: wrong number of fields"} + } + + ver, err := strconv.Atoi(splitVerSig[0]) + if err != nil { + return false, errutil.UserError{Err: "invalid signature: version number could not be decoded"} + } + + if ver > p.LatestVersion { + return false, errutil.UserError{Err: "invalid signature: version is too new"} + } + + if p.MinDecryptionVersion > 0 && ver < p.MinDecryptionVersion { + return false, errutil.UserError{Err: ErrTooOld} + } + + switch p.Type { + case keyType_ECDSA_P256: + asn1Sig, err := base64.StdEncoding.DecodeString(splitVerSig[1]) + if err != nil { + return false, errutil.UserError{Err: "invalid base64 signature value"} + } + + var ecdsaSig ecdsaSignature + rest, err := asn1.Unmarshal(asn1Sig, &ecdsaSig) + if err != nil { + return false, errutil.UserError{Err: "supplied signature is invalid"} + } + if rest != nil && len(rest) != 0 { + return false, errutil.UserError{Err: "supplied signature contains extra data"} + } + + keyParams := p.Keys[ver] + key := &ecdsa.PublicKey{ + Curve: elliptic.P256(), + X: keyParams.EC_X, + Y: keyParams.EC_Y, + } + + return ecdsa.Verify(key, hashedInput, ecdsaSig.R, ecdsaSig.S), nil + default: + return false, errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} + } + + return false, errutil.InternalError{Err: "no valid key type found"} +} + func (p *policy) rotate(storage logical.Storage) error { if p.Keys == nil { // This is an initial key rotation when generating a new policy. We @@ -527,19 +737,51 @@ func (p *policy) rotate(storage logical.Storage) error { p.Keys = keyEntryMap{} } - // Generate a 256bit key - newKey, err := uuid.GenerateRandomBytes(32) + p.LatestVersion += 1 + entry := keyEntry{ + CreationTime: time.Now().Unix(), + } + + hmacKey, err := uuid.GenerateRandomBytes(32) if err != nil { return err } + entry.HMACKey = hmacKey - p.LatestVersion += 1 + switch p.Type { + case keyType_AES256_GCM96: + // Generate a 256bit key + newKey, err := uuid.GenerateRandomBytes(32) + if err != nil { + return err + } + entry.AESKey = newKey - p.Keys[p.LatestVersion] = keyEntry{ - Key: newKey, - CreationTime: time.Now().Unix(), + case keyType_ECDSA_P256: + privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return err + } + entry.EC_D = privKey.D + entry.EC_X = privKey.X + entry.EC_Y = privKey.Y + derBytes, err := x509.MarshalPKIXPublicKey(privKey.Public()) + if err != nil { + return fmt.Errorf("error marshaling public key: %s", err) + } + pemBlock := &pem.Block{ + Type: "PUBLIC KEY", + Bytes: derBytes, + } + pemBytes := pem.EncodeToMemory(pemBlock) + if pemBytes == nil || len(pemBytes) == 0 { + return fmt.Errorf("error PEM-encoding public key") + } + entry.FormattedPublicKey = string(pemBytes) } + p.Keys[p.LatestVersion] = entry + // This ensures that with new key creations min decryption version is set // to 1 rather than the int default of 0, since keys start at 1 (either // fresh or after migration to the key map) @@ -553,7 +795,7 @@ func (p *policy) rotate(storage logical.Storage) error { func (p *policy) migrateKeyToKeysMap() { p.Keys = keyEntryMap{ 1: keyEntry{ - Key: p.Key, + AESKey: p.Key, CreationTime: time.Now().Unix(), }, } diff --git a/builtin/logical/transit/policy_test.go b/builtin/logical/transit/policy_test.go index 729975cce..9c1a3869c 100644 --- a/builtin/logical/transit/policy_test.go +++ b/builtin/logical/transit/policy_test.go @@ -22,7 +22,11 @@ func Test_KeyUpgrade(t *testing.T) { func testKeyUpgradeCommon(t *testing.T, lm *lockManager) { storage := &logical.InmemStorage{} - p, lock, upserted, err := lm.GetPolicyUpsert(storage, "test", false, false) + p, lock, upserted, err := lm.GetPolicyUpsert(policyRequest{ + storage: storage, + keyType: keyType_AES256_GCM96, + name: "test", + }) if lock != nil { defer lock.RUnlock() } @@ -36,10 +40,10 @@ func testKeyUpgradeCommon(t *testing.T, lm *lockManager) { t.Fatal("expected an upsert") } - testBytes := make([]byte, len(p.Keys[1].Key)) - copy(testBytes, p.Keys[1].Key) + testBytes := make([]byte, len(p.Keys[1].AESKey)) + copy(testBytes, p.Keys[1].AESKey) - p.Key = p.Keys[1].Key + p.Key = p.Keys[1].AESKey p.Keys = nil p.migrateKeyToKeysMap() if p.Key != nil { @@ -48,7 +52,7 @@ func testKeyUpgradeCommon(t *testing.T, lm *lockManager) { if len(p.Keys) != 1 { t.Fatal("policy.Keys is the wrong size") } - if !reflect.DeepEqual(testBytes, p.Keys[1].Key) { + if !reflect.DeepEqual(testBytes, p.Keys[1].AESKey) { t.Fatal("key mismatch") } } @@ -67,8 +71,11 @@ func testArchivingUpgradeCommon(t *testing.T, lm *lockManager) { // zero and latest, respectively storage := &logical.InmemStorage{} - - p, lock, _, err := lm.GetPolicyUpsert(storage, "test", false, false) + p, lock, _, err := lm.GetPolicyUpsert(policyRequest{ + storage: storage, + keyType: keyType_AES256_GCM96, + name: "test", + }) if err != nil { t.Fatal(err) } @@ -191,14 +198,16 @@ func Test_Archiving(t *testing.T) { func testArchivingCommon(t *testing.T, lm *lockManager) { resetKeysArchive() - // First, we generate a policy and rotate it a number of times. Each time - // we'll ensure that we have the expected number of keys in the archive and + // First, we generate a policy and rotate it a number of times. Each time // we'll ensure that we have the expected number of keys in the archive and // the main keys object, which without changing the min version should be // zero and latest, respectively storage := &logical.InmemStorage{} - - p, lock, _, err := lm.GetPolicyUpsert(storage, "test", false, false) + p, lock, _, err := lm.GetPolicyUpsert(policyRequest{ + storage: storage, + keyType: keyType_AES256_GCM96, + name: "test", + }) if lock != nil { defer lock.RUnlock() } @@ -327,7 +336,7 @@ func checkKeys(t *testing.T, } for i := 1; i < len(archive.Keys); i++ { - if !reflect.DeepEqual(archive.Keys[i].Key, keysArchive[i].Key) { + if !reflect.DeepEqual(archive.Keys[i].AESKey, keysArchive[i].AESKey) { t.Fatalf("key %d not equivalent between policy archive and test keys archive", i) } } diff --git a/helper/salt/salt.go b/helper/salt/salt.go index dc6a6f777..3dba9eb27 100644 --- a/helper/salt/salt.go +++ b/helper/salt/salt.go @@ -26,7 +26,6 @@ type Salt struct { config *Config salt string generated bool - hmacType string } type HashFunc func([]byte) []byte @@ -62,6 +61,10 @@ func NewSalt(view logical.Storage, config *Config) (*Salt, error) { if config.HashFunc == nil { config.HashFunc = SHA256Hash } + if config.HMAC == nil { + config.HMAC = sha256.New + config.HMACType = "hmac-sha256" + } // Create the salt s := &Salt{ @@ -69,9 +72,13 @@ func NewSalt(view logical.Storage, config *Config) (*Salt, error) { } // Look for the salt - raw, err := view.Get(config.Location) - if err != nil { - return nil, fmt.Errorf("failed to read salt: %v", err) + var raw *logical.StorageEntry + var err error + if view != nil { + raw, err = view.Get(config.Location) + if err != nil { + return nil, fmt.Errorf("failed to read salt: %v", err) + } } // Restore the salt if it exists @@ -101,7 +108,6 @@ func NewSalt(view logical.Storage, config *Config) (*Salt, error) { if len(config.HMACType) == 0 { return nil, fmt.Errorf("HMACType must be defined") } - s.hmacType = config.HMACType } return s, nil @@ -124,7 +130,7 @@ func (s *Salt) GetHMAC(data string) string { // GetIdentifiedHMAC is used to apply a salt and hash function to data to make // sure it is not reversible, with an additional HMAC, and ID prepended func (s *Salt) GetIdentifiedHMAC(data string) string { - return s.hmacType + ":" + s.GetHMAC(data) + return s.config.HMACType + ":" + s.GetHMAC(data) } // DidGenerate returns if the underlying salt value was generated diff --git a/vendor/github.com/jefferai/jsonx/LICENSE b/vendor/github.com/jefferai/jsonx/LICENSE new file mode 100644 index 000000000..a612ad981 --- /dev/null +++ b/vendor/github.com/jefferai/jsonx/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/jefferai/jsonx/README.md b/vendor/github.com/jefferai/jsonx/README.md new file mode 100644 index 000000000..a7bb5bac9 --- /dev/null +++ b/vendor/github.com/jefferai/jsonx/README.md @@ -0,0 +1,12 @@ +JSONx +======== + +[![GoDoc](https://godoc.org/github.com/jefferai/jsonx?status.svg)](https://godoc.org/github.com/jefferai/jsonx) + +A Go (Golang) library to transform an object or existing JSON bytes into +[JSONx](https://www.ibm.com/support/knowledgecenter/SS9H2Y_7.5.0/com.ibm.dp.doc/json_jsonxconversionrules.html). +Because sometimes your luck runs out. + +This follows the "standard" except for the handling of special and escaped +characters. Names and values are properly XML-escaped but there is no special +handling of values already escaped in JSON if they are valid in XML. diff --git a/vendor/github.com/jefferai/jsonx/jsonx.go b/vendor/github.com/jefferai/jsonx/jsonx.go new file mode 100644 index 000000000..93d24a9b0 --- /dev/null +++ b/vendor/github.com/jefferai/jsonx/jsonx.go @@ -0,0 +1,132 @@ +package jsonx + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "sort" + + "github.com/Jeffail/gabs" +) + +const ( + XMLHeader = `` + Header = `` + Footer = `` +) + +// namedContainer wraps a gabs.Container to carry name information with it +type namedContainer struct { + name string + *gabs.Container +} + +// Marshal marshals the input data into JSONx. +func Marshal(input interface{}) (string, error) { + jsonBytes, err := json.Marshal(input) + if err != nil { + return "", err + } + xmlBytes, err := EncodeJSONBytes(jsonBytes) + if err != nil { + return "", err + } + return fmt.Sprintf("%s%s%s%s", XMLHeader, Header, string(xmlBytes), Footer), nil +} + +// EncodeJSONBytes encodes JSON-formatted bytes into JSONx. It is designed to +// be used for multiple entries so does not prepend the JSONx header tag or +// append the JSONx footer tag. You can use jsonx.Header and jsonx.Footer to +// easily add these when necessary. +func EncodeJSONBytes(input []byte) ([]byte, error) { + o := bytes.NewBuffer(nil) + reader := bytes.NewReader(input) + dec := json.NewDecoder(reader) + dec.UseNumber() + + cont, err := gabs.ParseJSONDecoder(dec) + if err != nil { + return nil, err + } + + if err := sortAndTransformObject(o, &namedContainer{Container: cont}); err != nil { + return nil, err + } + + return o.Bytes(), nil +} + +func transformContainer(o *bytes.Buffer, cont *namedContainer) error { + var printName string + + if cont.name != "" { + escapedNameBuf := bytes.NewBuffer(nil) + err := xml.EscapeText(escapedNameBuf, []byte(cont.name)) + if err != nil { + return err + } + printName = fmt.Sprintf(" name=\"%s\"", escapedNameBuf.String()) + } + + data := cont.Data() + switch data.(type) { + case nil: + o.WriteString(fmt.Sprintf("", printName)) + + case bool: + o.WriteString(fmt.Sprintf("%t", printName, data)) + + case json.Number: + o.WriteString(fmt.Sprintf("%v", printName, data)) + + case string: + o.WriteString(fmt.Sprintf("%v", printName, data)) + + case []interface{}: + o.WriteString(fmt.Sprintf("", printName)) + arrayChildren, err := cont.Children() + if err != nil { + return err + } + for _, child := range arrayChildren { + if err := transformContainer(o, &namedContainer{Container: child}); err != nil { + return err + } + } + o.WriteString("") + + case map[string]interface{}: + o.WriteString(fmt.Sprintf("", printName)) + + if err := sortAndTransformObject(o, cont); err != nil { + return err + } + + o.WriteString("") + } + + return nil +} + +// sortAndTransformObject sorts object keys to make the output predictable so +// the package can be tested; logic is here to prevent code duplication +func sortAndTransformObject(o *bytes.Buffer, cont *namedContainer) error { + objectChildren, err := cont.ChildrenMap() + if err != nil { + return err + } + + sortedNames := make([]string, 0, len(objectChildren)) + for name, _ := range objectChildren { + sortedNames = append(sortedNames, name) + } + sort.Strings(sortedNames) + for _, name := range sortedNames { + if err := transformContainer(o, &namedContainer{name: name, Container: objectChildren[name]}); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 3baba3130..81bb9ac78 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -606,6 +606,12 @@ "revision": "d1caa6c97c9fc1cc9e83bbe34d0603f9ff0ce8bd", "revisionTime": "2016-07-20T23:31:40Z" }, + { + "checksumSHA1": "cIinEjB62s8j5cpY1u7sxtg4akg=", + "path": "github.com/jefferai/jsonx", + "revision": "9cc31c3135eef39b8e72585f37efa92b6ca314d0", + "revisionTime": "2016-07-21T23:51:17Z" + }, { "checksumSHA1": "0ZrwvB6KoGPj2PoDNSEJwxQ6Mog=", "path": "github.com/jmespath/go-jmespath", diff --git a/website/source/docs/audit/file.html.md b/website/source/docs/audit/file.html.md index 1b66c899c..1b09abf35 100644 --- a/website/source/docs/audit/file.html.md +++ b/website/source/docs/audit/file.html.md @@ -57,8 +57,14 @@ Following are the configuration options available for the backend.
  • hmac_accessor optional - A boolean, if set, enables the hashing of token accessor. Defaults to `true`. This option - is useful only when `log_raw` is `false`. + A boolean, if set, enables the hashing of token accessor. Defaults + to `true`. This option is useful only when `log_raw` is `false`. +
  • +
  • + format + optional + Allows selecting the output format. Valid values are `json` (the + default) and `jsonx`, which formats the normal log entries as XML.
  • diff --git a/website/source/docs/audit/syslog.html.md b/website/source/docs/audit/syslog.html.md index b52d37e7b..f7cb4ac5c 100644 --- a/website/source/docs/audit/syslog.html.md +++ b/website/source/docs/audit/syslog.html.md @@ -62,8 +62,14 @@ Following are the configuration options available for the backend.
  • hmac_accessor optional - A boolean, if set, enables the hashing of token accessor. Defaults to `true`. This option - is useful only when `log_raw` is `false`. + A boolean, if set, enables the hashing of token accessor. Defaults + to `true`. This option is useful only when `log_raw` is `false`. +
  • +
  • + format + optional + Allows selecting the output format. Valid values are `json` (the + default) and `jsonx`, which formats the normal log entries as XML.
  • diff --git a/website/source/docs/secrets/transit/index.html.md b/website/source/docs/secrets/transit/index.html.md index da0b69b6f..baf2acb3e 100644 --- a/website/source/docs/secrets/transit/index.html.md +++ b/website/source/docs/secrets/transit/index.html.md @@ -10,19 +10,20 @@ description: |- Name: `transit` -The transit secret backend is used to encrypt/decrypt data in-transit. Vault -doesn't store the data sent to the backend. It can also be viewed as "encryption -as a service." +The transit secret backend handles cryptographic functions on data in-transit. +Vault doesn't store the data sent to the backend. It can also be viewed as +"cryptography as a service." -The primary use case for the transit backend is to encrypt data from -applications while still storing that encrypted data in some primary data -store. This relieves the burden of proper encryption/decryption from -application developers and pushes the burden onto the operators of Vault. -Operators of Vault generally include the security team at an organization, -which means they can ensure that data is encrypted/decrypted properly. +The primary use case for `transit` is to encrypt data from applications while +still storing that encrypted data in some primary data store. This relieves the +burden of proper encryption/decryption from application developers and pushes +the burden onto the operators of Vault. Operators of Vault generally include +the security team at an organization, which means they can ensure that data is +encrypted/decrypted properly. Additionally, since encrypt/decrypt operations +must enter the audit log, any decryption event is recorded. -Additionally, since encrypt/decrypt operations must enter the audit log, -any decryption event is recorded. +`transit` can also sign and verify data; generate hashes and HMACs of data; and +act as a source of random bytes. Due to Vault's flexible ACLs, other interesting use-cases are possible. For instance, one set of Internet-facing servers can be given permission to encrypt @@ -85,7 +86,7 @@ the settings of the "foo" key by reading it: ``` $ vault read transit/keys/foo Key Value -cipher_mode aes-gcm +type aes256-gcm96 deletion_allowed false derived false keys map[1:1.459861712e+09] @@ -131,8 +132,8 @@ only encrypt or decrypt using the named keys they need access to.
    Description
    - Creates a new named encryption key. The values set here cannot be changed - after key creation. + Creates a new named encryption key of the specified type. The values set + here cannot be changed after key creation.
    Method
    @@ -144,6 +145,16 @@ only encrypt or decrypt using the named keys they need access to.
    Parameters
      +
    • + type + required + The type of key to create. The currently-supported types are: +
        +
      • `aes256-gcm96`: AES-256 wrapped with GCM using a 12-byte nonce size (symmetric)
      • +
      • `ecdsa-p256`: ECDSA using the P-256 elliptic curve (asymmetric)
      • +
      + Defaults to `aes256-gcm`. +
    • derived optional @@ -180,7 +191,9 @@ only encrypt or decrypt using the named keys they need access to.
      Returns information about a named encryption key. The `keys` object shows the creation time of each key version; the values are not the keys - themselves. + themselves. Depending on the type of key, different information may be + returned, e.g. an asymmetric key will return its public key in a standard + format for the type.
      Method
      @@ -200,7 +213,7 @@ only encrypt or decrypt using the named keys they need access to. ```javascript { "data": { - "cipher_mode": "aes-gcm", + "type": "aes256-gcm96", "deletion_allowed": false, "derived": false, "keys": { @@ -268,10 +281,13 @@ only encrypt or decrypt using the named keys they need access to. The minimum version of ciphertext allowed to be decrypted. Adjusting this as part of a key rotation policy can prevent old copies of ciphertext from being decrypted, should they fall into the wrong hands. + For signatures, this value controls the minimum version of signature + that can be verified against. For HMACs, this controls the minimum + version of a key allowed to be used as the key for the HMAC function. Defaults to 0.
    • - deletion_allowed + allow_deletion optional When set, the key is allowed to be deleted. Defaults to false.
    • @@ -293,7 +309,8 @@ only encrypt or decrypt using the named keys they need access to. Rotates the version of the named key. After rotation, new plaintext requests will be encrypted with the new version of the key. To upgrade ciphertext to be encrypted with the latest version of the key, use the - `rewrap` endpoint. + `rewrap` endpoint. This is only supported with keys that support encryption + and decryption operations.
    Method
    @@ -319,13 +336,13 @@ only encrypt or decrypt using the named keys they need access to.
    Description
    - Encrypts the provided plaintext using the named key. This path supports the - `create` and `update` policy capabilities as follows: if the user has the - `create` capability for this endpoint in their policies, and the key does - not exist, it will be upserted with default values (whether the key - requires derivation depends on whether the context parameter is empty or - not). If the user only has `update` capability and the key does not exist, - an error will be returned. + Encrypts the provided plaintext using the named key. Currently, this only + supports symmetric keys. This path supports the `create` and `update` + policy capabilities as follows: if the user has the `create` capability for + this endpoint in their policies, and the key does not exist, it will be + upserted with default values (whether the key requires derivation depends + on whether the context parameter is empty or not). If the user only has + `update` capability and the key does not exist, an error will be returned.
    Method
    @@ -340,12 +357,12 @@ only encrypt or decrypt using the named keys they need access to.
  • plaintext required - The plaintext to encrypt, provided as base64 encoded. + The plaintext to encrypt, provided as a base64-encoded string.
  • context optional - The key derivation context, provided as base64 encoded. + The key derivation context, provided as a base64-encoded string. Must be provided if derivation is enabled.
  • @@ -381,7 +398,8 @@ only encrypt or decrypt using the named keys they need access to.
    Description
    - Decrypts the provided ciphertext using the named key. + Decrypts the provided ciphertext using the named key. Currently, this only + supports symmetric keys.
    Method
    @@ -401,7 +419,7 @@ only encrypt or decrypt using the named keys they need access to.
  • context optional - The key derivation context, provided as base64 encoded. + The key derivation context, provided as a base64-encoded string. Must be provided if derivation is enabled.
  • @@ -457,7 +475,7 @@ only encrypt or decrypt using the named keys they need access to.
  • context optional - The key derivation context, provided as base64 encoded. + The key derivation context, provided as base64-encoded string. Must be provided if derivation is enabled.
  • @@ -517,7 +535,7 @@ only encrypt or decrypt using the named keys they need access to.
  • context optional - The key derivation context, provided as base64 encoded. + The key derivation context, provided as a base64-encoded string. Must be provided if derivation is enabled.
  • @@ -553,3 +571,288 @@ only encrypt or decrypt using the named keys they need access to.
  • + +### /transit/random +#### POST + +
    +
    Description
    +
    + Return high-quality random bytes of the specified length. +
    + +
    Method
    +
    POST
    + +
    URL
    +
    `/transit/random(/)`
    + +
    Parameters
    +
    +
      +
    • + bytes + optional + The number of bytes to return. Defaults to 32 (256 bits). This value + can be specified either in the request body, or as a part of the URL + with a format like `/transit/random/48`. +
    • +
    • + format + optional + The output encoding; can be either `hex` or `base64`. Defaults to + `base64`. +
    • +
    +
    + +
    Returns
    +
    + + ```javascript + { + "data": { + "random_bytes": "dGhlIHF1aWNrIGJyb3duIGZveAo=" + } + } + ``` + +
    +
    + +### /transit/hash +#### POST + +
    +
    Description
    +
    + Returns the hash of given data using the specified algorithm. The algorithm + can be specified as part of the URL or given via a parameter; the URL value + takes precedence if both are set. +
    + +
    Method
    +
    POST
    + +
    URL
    +
    `/transit/hash(/)`
    + +
    Parameters
    +
    +
      +
    • + input + required + The base64-encoded input data. +
    • +
    • + algorithm + optional + The hash algorithm to use. This can also be specified in the URL. + Currently-supported algorithms are: +
        +
      • `sha2-224`
      • +
      • `sha2-256`
      • +
      • `sha2-384`
      • +
      • `sha2-512`
      • +
      + Defaults to `sha2-256`. +
    • +
    • + format + optional + The output encoding; can be either `hex` or `base64`. Defaults to + `hex`. +
    • +
    +
    + +
    Returns
    +
    + + ```javascript + { + "data": { + "sum": "dGhlIHF1aWNrIGJyb3duIGZveAo=" + } + } + ``` + +
    +
    + +### /transit/hmac/ +#### POST + +
    +
    Description
    +
    + Returns the digest of given data using the specified hash algorithm and the + named key. The key can be of any type supported by `transit`; the raw key + will be marshalled into bytes to be used for the HMAC function. If the key + is of a type that supports rotation, the latest (current) version will be + used. +
    + +
    Method
    +
    POST
    + +
    URL
    +
    `/transit/hmac/(/)`
    + +
    Parameters
    +
    +
      +
    • + input + required + The base64-encoded input data. +
    • +
    • + algorithm + optional + The hash algorithm to use. This can also be specified in the URL. + Currently-supported algorithms are: +
        +
      • `sha2-224`
      • +
      • `sha2-256`
      • +
      • `sha2-384`
      • +
      • `sha2-512`
      • +
      + Defaults to `sha2-256`. +
    • +
    • + format + optional + The output encoding; can be either `hex` or `base64`. Defaults to + `hex`. +
    • +
    +
    + +
    Returns
    +
    + + ```javascript + { + "data": { + "hmac": "dGhlIHF1aWNrIGJyb3duIGZveAo=" + } + } + ``` + +
    +
    + +### /transit/sign/ +#### POST + +
    +
    Description
    +
    + Returns the cryptographic signature of the given data using the named key + and the specified hash algorithm. The key must be of a type that supports + signing. +
    + +
    Method
    +
    POST
    + +
    URL
    +
    `/transit/sign/(/)`
    + +
    Parameters
    +
    +
      +
    • + input + required + The base64-encoded input data. +
    • +
    • + algorithm + optional + The hash algorithm to use. This can also be specified in the URL. + Currently-supported algorithms are: +
        +
      • `sha2-224`
      • +
      • `sha2-256`
      • +
      • `sha2-384`
      • +
      • `sha2-512`
      • +
      + Defaults to `sha2-256`. +
    • +
    +
    + +
    Returns
    +
    + + ```javascript + { + "data": { + "signature": "vault:v1:MEUCIQCyb869d7KWuA0hBM9b5NJrmWzMW3/pT+0XYCM9VmGR+QIgWWF6ufi4OS2xo1eS2V5IeJQfsi59qeMWtgX0LipxEHI=" + } + } + ``` + +
    +
    + +### /transit/verify/ +#### POST + +
    +
    Description
    +
    + Returns whether the provided signature is valid for the given data. +
    + +
    Method
    +
    POST
    + +
    URL
    +
    `/transit/verify/(/)`
    + +
    Parameters
    +
    +
      +
    • + input + required + The base64-encoded input data. +
    • +
    • + signature + required + The signature output from the `/transit/sign` function. +
    • +
    • + algorithm + optional + The hash algorithm to use. This can also be specified in the URL. + Currently-supported algorithms are: +
        +
      • `sha2-224`
      • +
      • `sha2-256`
      • +
      • `sha2-384`
      • +
      • `sha2-512`
      • +
      + Defaults to `sha2-256`. +
    • +
    +
    + +
    Returns
    +
    + + ```javascript + { + "data": { + "valid": true + } + } + ``` + +
    +