diff --git a/audit/hashstructure.go b/audit/hashstructure.go index 8d0fd7c6c..ea0899ee9 100644 --- a/audit/hashstructure.go +++ b/audit/hashstructure.go @@ -5,6 +5,7 @@ import ( "strings" "github.com/hashicorp/vault/helper/salt" + "github.com/hashicorp/vault/helper/wrapping" "github.com/hashicorp/vault/logical" "github.com/mitchellh/copystructure" "github.com/mitchellh/reflectwalk" @@ -84,7 +85,7 @@ func Hash(salter *salt.Salt, raw interface{}) error { s.Data = data.(map[string]interface{}) - case *logical.ResponseWrapInfo: + case *wrapping.ResponseWrapInfo: if s == nil { return nil } diff --git a/audit/hashstructure_test.go b/audit/hashstructure_test.go index 5fefa0fa9..6916d0d3a 100644 --- a/audit/hashstructure_test.go +++ b/audit/hashstructure_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/vault/helper/certutil" "github.com/hashicorp/vault/helper/salt" + "github.com/hashicorp/vault/helper/wrapping" "github.com/hashicorp/vault/logical" "github.com/mitchellh/copystructure" ) @@ -69,7 +70,7 @@ func TestCopy_response(t *testing.T) { Data: map[string]interface{}{ "foo": "bar", }, - WrapInfo: &logical.ResponseWrapInfo{ + WrapInfo: &wrapping.ResponseWrapInfo{ TTL: 60, Token: "foo", CreationTime: time.Now(), @@ -140,7 +141,7 @@ func TestHash(t *testing.T) { Data: map[string]interface{}{ "foo": "bar", }, - WrapInfo: &logical.ResponseWrapInfo{ + WrapInfo: &wrapping.ResponseWrapInfo{ TTL: 60, Token: "bar", CreationTime: now, @@ -151,7 +152,7 @@ func TestHash(t *testing.T) { Data: map[string]interface{}{ "foo": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", }, - WrapInfo: &logical.ResponseWrapInfo{ + WrapInfo: &wrapping.ResponseWrapInfo{ TTL: 60, Token: "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", CreationTime: now, diff --git a/builtin/logical/database/backend.go b/builtin/logical/database/backend.go new file mode 100644 index 000000000..91b92e438 --- /dev/null +++ b/builtin/logical/database/backend.go @@ -0,0 +1,177 @@ +package database + +import ( + "fmt" + "net/rpc" + "strings" + "sync" + + log "github.com/mgutz/logxi/v1" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +const databaseConfigPath = "database/config/" + +func Factory(conf *logical.BackendConfig) (logical.Backend, error) { + return Backend(conf).Setup(conf) +} + +func Backend(conf *logical.BackendConfig) *databaseBackend { + var b databaseBackend + b.Backend = &framework.Backend{ + Help: strings.TrimSpace(backendHelp), + + Paths: []*framework.Path{ + pathConfigurePluginConnection(&b), + pathListRoles(&b), + pathRoles(&b), + pathCredsCreate(&b), + pathResetConnection(&b), + }, + + Secrets: []*framework.Secret{ + secretCreds(&b), + }, + + Clean: b.closeAllDBs, + + Invalidate: b.invalidate, + } + + b.logger = conf.Logger + b.connections = make(map[string]dbplugin.Database) + return &b +} + +type databaseBackend struct { + connections map[string]dbplugin.Database + logger log.Logger + + *framework.Backend + sync.RWMutex +} + +// closeAllDBs closes all connections from all database types +func (b *databaseBackend) closeAllDBs() { + b.Lock() + defer b.Unlock() + + for _, db := range b.connections { + db.Close() + } + + b.connections = make(map[string]dbplugin.Database) +} + +// This function is used to retrieve a database object either from the cached +// connection map. The caller of this function needs to hold the backend's read +// lock. +func (b *databaseBackend) getDBObj(name string) (dbplugin.Database, bool) { + db, ok := b.connections[name] + return db, ok +} + +// This function creates a new db object from the stored configuration and +// caches it in the connections map. The caller of this function needs to hold +// the backend's write lock +func (b *databaseBackend) createDBObj(s logical.Storage, name string) (dbplugin.Database, error) { + db, ok := b.connections[name] + if ok { + return db, nil + } + + config, err := b.DatabaseConfig(s, name) + if err != nil { + return nil, err + } + + db, err = dbplugin.PluginFactory(config.PluginName, b.System(), b.logger) + if err != nil { + return nil, err + } + + err = db.Initialize(config.ConnectionDetails, true) + if err != nil { + return nil, err + } + + b.connections[name] = db + + return db, nil +} + +func (b *databaseBackend) DatabaseConfig(s logical.Storage, name string) (*DatabaseConfig, error) { + entry, err := s.Get(fmt.Sprintf("config/%s", name)) + if err != nil { + return nil, fmt.Errorf("failed to read connection configuration: %s", err) + } + if entry == nil { + return nil, fmt.Errorf("failed to find entry for connection with name: %s", name) + } + + var config DatabaseConfig + if err := entry.DecodeJSON(&config); err != nil { + return nil, err + } + + return &config, nil +} + +func (b *databaseBackend) Role(s logical.Storage, roleName string) (*roleEntry, error) { + entry, err := s.Get("role/" + roleName) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result roleEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *databaseBackend) invalidate(key string) { + b.Lock() + defer b.Unlock() + + switch { + case strings.HasPrefix(key, databaseConfigPath): + name := strings.TrimPrefix(key, databaseConfigPath) + b.clearConnection(name) + } +} + +// clearConnection closes the database connection and +// removes it from the b.connections map. +func (b *databaseBackend) clearConnection(name string) { + db, ok := b.connections[name] + if ok { + db.Close() + delete(b.connections, name) + } +} + +func (b *databaseBackend) closeIfShutdown(name string, err error) { + // Plugin has shutdown, close it so next call can reconnect. + if err == rpc.ErrShutdown { + b.Lock() + b.clearConnection(name) + b.Unlock() + } +} + +const backendHelp = ` +The database backend supports using many different databases +as secret backends, including but not limited to: +cassandra, mssql, mysql, postgres + +After mounting this backend, configure it using the endpoints within +the "database/config/" path. +` diff --git a/builtin/logical/database/backend_test.go b/builtin/logical/database/backend_test.go new file mode 100644 index 000000000..27c20d332 --- /dev/null +++ b/builtin/logical/database/backend_test.go @@ -0,0 +1,766 @@ +package database + +import ( + "database/sql" + "fmt" + "io/ioutil" + "log" + stdhttp "net/http" + "os" + "reflect" + "sync" + "testing" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/plugins/database/postgresql" + "github.com/hashicorp/vault/vault" + "github.com/lib/pq" + "github.com/mitchellh/mapstructure" + dockertest "gopkg.in/ory-am/dockertest.v3" +) + +var ( + testImagePull sync.Once +) + +func preparePostgresTestContainer(t *testing.T, s logical.Storage, b logical.Backend) (cleanup func(), retURL string) { + if os.Getenv("PG_URL") != "" { + return func() {}, os.Getenv("PG_URL") + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("Failed to connect to docker: %s", err) + } + + resource, err := pool.Run("postgres", "latest", []string{"POSTGRES_PASSWORD=secret", "POSTGRES_DB=database"}) + if err != nil { + t.Fatalf("Could not start local PostgreSQL docker container: %s", err) + } + + cleanup = func() { + err := pool.Purge(resource) + if err != nil { + t.Fatalf("Failed to cleanup local container: %s", err) + } + } + + retURL = fmt.Sprintf("postgres://postgres:secret@localhost:%s/database?sslmode=disable", resource.GetPort("5432/tcp")) + + // exponential backoff-retry + if err = pool.Retry(func() error { + // This will cause a validation to run + resp, err := b.HandleRequest(&logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: "config/postgresql", + Data: map[string]interface{}{ + "plugin_name": "postgresql-database-plugin", + "connection_url": retURL, + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + // It's likely not up and running yet, so return error and try again + return fmt.Errorf("err:%s resp:%#v\n", err, resp) + } + if resp == nil { + t.Fatal("expected warning") + } + + return nil + }); err != nil { + t.Fatalf("Could not connect to PostgreSQL docker container: %s", err) + } + + return +} + +func getCore(t *testing.T) ([]*vault.TestClusterCore, logical.SystemView) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "database": Factory, + }, + } + + handler1 := stdhttp.NewServeMux() + handler2 := stdhttp.NewServeMux() + handler3 := stdhttp.NewServeMux() + + // Chicken-and-egg: Handler needs a core. So we create handlers first, then + // add routes chained to a Handler-created handler. + cores := vault.TestCluster(t, []stdhttp.Handler{handler1, handler2, handler3}, coreConfig, false) + handler1.Handle("/", http.Handler(cores[0].Core)) + handler2.Handle("/", http.Handler(cores[1].Core)) + handler3.Handle("/", http.Handler(cores[2].Core)) + + core := cores[0] + + sys := vault.TestDynamicSystemView(core.Core) + vault.TestAddTestPlugin(t, core.Core, "postgresql-database-plugin", "TestBackend_PluginMain") + + return cores, sys +} + +func TestBackend_PluginMain(t *testing.T) { + if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" { + return + } + + content := []byte(vault.TestClusterCACert) + tmpfile, err := ioutil.TempFile("", "example") + if err != nil { + t.Fatal(err) + } + + defer os.Remove(tmpfile.Name()) // clean up + + if _, err := tmpfile.Write(content); err != nil { + t.Fatal(err) + } + if err := tmpfile.Close(); err != nil { + t.Fatal(err) + } + + args := []string{"--ca-cert=" + tmpfile.Name()} + + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(args) + + postgresql.Run(apiClientMeta.GetTLSConfig()) +} + +func TestBackend_config_connection(t *testing.T) { + var resp *logical.Response + var err error + cores, sys := getCore(t) + for _, core := range cores { + defer core.CloseListeners() + } + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup() + + configData := map[string]interface{}{ + "connection_url": "sample_connection_url", + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + } + + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: configData, + } + resp, err = b.HandleRequest(configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + expected := map[string]interface{}{ + "plugin_name": "postgresql-database-plugin", + "connection_details": map[string]interface{}{ + "connection_url": "sample_connection_url", + }, + "allowed_roles": []string{"*"}, + } + configReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + delete(resp.Data["connection_details"].(map[string]interface{}), "name") + if !reflect.DeepEqual(expected, resp.Data) { + t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) + } +} + +func TestBackend_basic(t *testing.T) { + cores, sys := getCore(t) + for _, core := range cores { + defer core.CloseListeners() + } + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup() + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b) + defer cleanup() + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "default_ttl": "5m", + "max_ttl": "10m", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Get creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + credsResp, err := b.HandleRequest(req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + + if !testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should exist") + } + + // Revoke creds + resp, err = b.HandleRequest(&logical.Request{ + Operation: logical.RevokeOperation, + Storage: config.StorageView, + Secret: &logical.Secret{ + InternalData: map[string]interface{}{ + "secret_type": "creds", + "username": credsResp.Data["username"], + "role": "plugin-role-test", + }, + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + if testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should not exist") + } + +} + +func TestBackend_connectionCrud(t *testing.T) { + cores, sys := getCore(t) + for _, core := range cores { + defer core.CloseListeners() + } + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup() + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b) + defer cleanup() + + // Configure a connection + data := map[string]interface{}{ + "connection_url": "test", + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "revocation_statements": defaultRevocationSQL, + "default_ttl": "5m", + "max_ttl": "10m", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Update the connection + data = map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read connection + expected := map[string]interface{}{ + "plugin_name": "postgresql-database-plugin", + "connection_details": map[string]interface{}{ + "connection_url": connURL, + }, + "allowed_roles": []string{"plugin-role-test"}, + } + req.Operation = logical.ReadOperation + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + delete(resp.Data["connection_details"].(map[string]interface{}), "name") + if !reflect.DeepEqual(expected, resp.Data) { + t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) + } + + // Reset Connection + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "reset/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Get creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + credsResp, err := b.HandleRequest(req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + + if !testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should exist") + } + + // Delete Connection + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.DeleteOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read connection + req.Operation = logical.ReadOperation + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Should be empty + if resp != nil { + t.Fatal("Expected response to be nil") + } +} + +func TestBackend_roleCrud(t *testing.T) { + cores, sys := getCore(t) + for _, core := range cores { + defer core.CloseListeners() + } + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup() + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b) + defer cleanup() + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "revocation_statements": defaultRevocationSQL, + "default_ttl": "5m", + "max_ttl": "10m", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + expected := dbplugin.Statements{ + CreationStatements: testRole, + RevocationStatements: defaultRevocationSQL, + } + + var actual dbplugin.Statements + if err := mapstructure.Decode(resp.Data, &actual); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("Statements did not match, exepected %#v, got %#v", expected, actual) + } + + // Delete the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.DeleteOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Should be empty + if resp != nil { + t.Fatal("Expected response to be nil") + } +} +func TestBackend_allowedRoles(t *testing.T) { + cores, sys := getCore(t) + for _, core := range cores { + defer core.CloseListeners() + } + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup() + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b) + defer cleanup() + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a denied and an allowed role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "default_ttl": "5m", + "max_ttl": "10m", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/denied", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "default_ttl": "5m", + "max_ttl": "10m", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/allowed", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Get creds from denied role, should fail + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/denied", + Storage: config.StorageView, + Data: data, + } + credsResp, err := b.HandleRequest(req) + if err != logical.ErrPermissionDenied { + t.Fatalf("expected error to be:%s got:%#v\n", logical.ErrPermissionDenied, err) + } + + // update connection with * allowed roles connection + data = map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": "*", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Get creds, should work. + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/allowed", + Storage: config.StorageView, + Data: data, + } + credsResp, err = b.HandleRequest(req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + + if !testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should exist") + } + + // update connection with allowed roles + data = map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": "allow, allowed", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Get creds from denied role, should fail + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/denied", + Storage: config.StorageView, + Data: data, + } + credsResp, err = b.HandleRequest(req) + if err != logical.ErrPermissionDenied { + t.Fatalf("expected error to be:%s got:%#v\n", logical.ErrPermissionDenied, err) + } + + // Get creds from allowed role, should work. + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/allowed", + Storage: config.StorageView, + Data: data, + } + credsResp, err = b.HandleRequest(req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + + if !testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should exist") + } +} + +func testCredsExist(t *testing.T, resp *logical.Response, connURL string) bool { + var d struct { + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + log.Printf("[TRACE] Generated credentials: %v", d) + conn, err := pq.ParseURL(connURL) + + if err != nil { + t.Fatal(err) + } + + conn += " timezone=utc" + + db, err := sql.Open("postgres", conn) + if err != nil { + t.Fatal(err) + } + + returnedRows := func() int { + stmt, err := db.Prepare("SELECT DISTINCT schemaname FROM pg_tables WHERE has_table_privilege($1, 'information_schema.role_column_grants', 'select');") + if err != nil { + return -1 + } + defer stmt.Close() + + rows, err := stmt.Query(d.Username) + if err != nil { + return -1 + } + defer rows.Close() + + i := 0 + for rows.Next() { + i++ + } + return i + } + + return returnedRows() == 2 +} + +const testRole = ` +CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; +` + +const defaultRevocationSQL = ` +REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}}; +REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}}; +REVOKE USAGE ON SCHEMA public FROM {{name}}; + +DROP ROLE IF EXISTS {{name}}; +` diff --git a/builtin/logical/database/dbplugin/client.go b/builtin/logical/database/dbplugin/client.go new file mode 100644 index 000000000..0c095f891 --- /dev/null +++ b/builtin/logical/database/dbplugin/client.go @@ -0,0 +1,132 @@ +package dbplugin + +import ( + "fmt" + "net/rpc" + "sync" + "time" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/helper/pluginutil" +) + +// DatabasePluginClient embeds a databasePluginRPCClient and wraps it's Close +// method to also call Kill() on the plugin.Client. +type DatabasePluginClient struct { + client *plugin.Client + sync.Mutex + + *databasePluginRPCClient +} + +func (dc *DatabasePluginClient) Close() error { + err := dc.databasePluginRPCClient.Close() + dc.client.Kill() + + return err +} + +// newPluginClient returns a databaseRPCClient with a connection to a running +// plugin. The client is wrapped in a DatabasePluginClient object to ensure the +// plugin is killed on call of Close(). +func newPluginClient(sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner) (Database, error) { + // pluginMap is the map of plugins we can dispense. + var pluginMap = map[string]plugin.Plugin{ + "database": new(DatabasePlugin), + } + + client, err := pluginRunner.Run(sys, pluginMap, handshakeConfig, []string{}) + if err != nil { + return nil, err + } + + // Connect via RPC + rpcClient, err := client.Client() + if err != nil { + return nil, err + } + + // Request the plugin + raw, err := rpcClient.Dispense("database") + if err != nil { + return nil, err + } + + // We should have a database type now. This feels like a normal interface + // implementation but is in fact over an RPC connection. + databaseRPC := raw.(*databasePluginRPCClient) + + // Wrap RPC implimentation in DatabasePluginClient + return &DatabasePluginClient{ + client: client, + databasePluginRPCClient: databaseRPC, + }, nil +} + +// ---- RPC client domain ---- + +// databasePluginRPCClient implements Database and is used on the client to +// make RPC calls to a plugin. +type databasePluginRPCClient struct { + client *rpc.Client +} + +func (dr *databasePluginRPCClient) Type() (string, error) { + var dbType string + err := dr.client.Call("Plugin.Type", struct{}{}, &dbType) + + return fmt.Sprintf("plugin-%s", dbType), err +} + +func (dr *databasePluginRPCClient) CreateUser(statements Statements, usernamePrefix string, expiration time.Time) (username string, password string, err error) { + req := CreateUserRequest{ + Statements: statements, + UsernamePrefix: usernamePrefix, + Expiration: expiration, + } + + var resp CreateUserResponse + err = dr.client.Call("Plugin.CreateUser", req, &resp) + + return resp.Username, resp.Password, err +} + +func (dr *databasePluginRPCClient) RenewUser(statements Statements, username string, expiration time.Time) error { + req := RenewUserRequest{ + Statements: statements, + Username: username, + Expiration: expiration, + } + + err := dr.client.Call("Plugin.RenewUser", req, &struct{}{}) + + return err +} + +func (dr *databasePluginRPCClient) RevokeUser(statements Statements, username string) error { + req := RevokeUserRequest{ + Statements: statements, + Username: username, + } + + err := dr.client.Call("Plugin.RevokeUser", req, &struct{}{}) + + return err +} + +func (dr *databasePluginRPCClient) Initialize(conf map[string]interface{}, verifyConnection bool) error { + req := InitializeRequest{ + Config: conf, + VerifyConnection: verifyConnection, + } + + err := dr.client.Call("Plugin.Initialize", req, &struct{}{}) + + return err +} + +func (dr *databasePluginRPCClient) Close() error { + err := dr.client.Call("Plugin.Close", struct{}{}, &struct{}{}) + + return err +} diff --git a/builtin/logical/database/dbplugin/databasemiddleware.go b/builtin/logical/database/dbplugin/databasemiddleware.go new file mode 100644 index 000000000..83f57ef87 --- /dev/null +++ b/builtin/logical/database/dbplugin/databasemiddleware.go @@ -0,0 +1,162 @@ +package dbplugin + +import ( + "time" + + metrics "github.com/armon/go-metrics" + log "github.com/mgutz/logxi/v1" +) + +// ---- Tracing Middleware Domain ---- + +// databaseTracingMiddleware wraps a implementation of Database and executes +// trace logging on function call. +type databaseTracingMiddleware struct { + next Database + logger log.Logger + + typeStr string +} + +func (mw *databaseTracingMiddleware) Type() (string, error) { + return mw.next.Type() +} + +func (mw *databaseTracingMiddleware) CreateUser(statements Statements, usernamePrefix string, expiration time.Time) (username string, password string, err error) { + defer func(then time.Time) { + mw.logger.Trace("database", "operation", "CreateUser", "status", "finished", "type", mw.typeStr, "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("database", "operation", "CreateUser", "status", "started", "type", mw.typeStr) + return mw.next.CreateUser(statements, usernamePrefix, expiration) +} + +func (mw *databaseTracingMiddleware) RenewUser(statements Statements, username string, expiration time.Time) (err error) { + defer func(then time.Time) { + mw.logger.Trace("database", "operation", "RenewUser", "status", "finished", "type", mw.typeStr, "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("database", "operation", "RenewUser", "status", "started", mw.typeStr) + return mw.next.RenewUser(statements, username, expiration) +} + +func (mw *databaseTracingMiddleware) RevokeUser(statements Statements, username string) (err error) { + defer func(then time.Time) { + mw.logger.Trace("database", "operation", "RevokeUser", "status", "finished", "type", mw.typeStr, "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("database", "operation", "RevokeUser", "status", "started", "type", mw.typeStr) + return mw.next.RevokeUser(statements, username) +} + +func (mw *databaseTracingMiddleware) Initialize(conf map[string]interface{}, verifyConnection bool) (err error) { + defer func(then time.Time) { + mw.logger.Trace("database", "operation", "Initialize", "status", "finished", "type", mw.typeStr, "verify", verifyConnection, "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("database", "operation", "Initialize", "status", "started", "type", mw.typeStr) + return mw.next.Initialize(conf, verifyConnection) +} + +func (mw *databaseTracingMiddleware) Close() (err error) { + defer func(then time.Time) { + mw.logger.Trace("database", "operation", "Close", "status", "finished", "type", mw.typeStr, "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("database", "operation", "Close", "status", "started", "type", mw.typeStr) + return mw.next.Close() +} + +// ---- Metrics Middleware Domain ---- + +// databaseMetricsMiddleware wraps an implementation of Databases and on +// function call logs metrics about this instance. +type databaseMetricsMiddleware struct { + next Database + + typeStr string +} + +func (mw *databaseMetricsMiddleware) Type() (string, error) { + return mw.next.Type() +} + +func (mw *databaseMetricsMiddleware) CreateUser(statements Statements, usernamePrefix string, expiration time.Time) (username string, password string, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "CreateUser"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "CreateUser"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "CreateUser", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "CreateUser", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "CreateUser"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "CreateUser"}, 1) + return mw.next.CreateUser(statements, usernamePrefix, expiration) +} + +func (mw *databaseMetricsMiddleware) RenewUser(statements Statements, username string, expiration time.Time) (err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "RenewUser"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "RenewUser"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "RenewUser", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RenewUser", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "RenewUser"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RenewUser"}, 1) + return mw.next.RenewUser(statements, username, expiration) +} + +func (mw *databaseMetricsMiddleware) RevokeUser(statements Statements, username string) (err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "RevokeUser"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "RevokeUser"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "RevokeUser", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RevokeUser", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "RevokeUser"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RevokeUser"}, 1) + return mw.next.RevokeUser(statements, username) +} + +func (mw *databaseMetricsMiddleware) Initialize(conf map[string]interface{}, verifyConnection bool) (err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "Initialize"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "Initialize"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "Initialize", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Initialize", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "Initialize"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Initialize"}, 1) + return mw.next.Initialize(conf, verifyConnection) +} + +func (mw *databaseMetricsMiddleware) Close() (err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "Close"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "Close"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "Close", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Close", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "Close"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Close"}, 1) + return mw.next.Close() +} diff --git a/builtin/logical/database/dbplugin/plugin.go b/builtin/logical/database/dbplugin/plugin.go new file mode 100644 index 000000000..bc63594ae --- /dev/null +++ b/builtin/logical/database/dbplugin/plugin.go @@ -0,0 +1,140 @@ +package dbplugin + +import ( + "fmt" + "net/rpc" + "time" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/helper/pluginutil" + log "github.com/mgutz/logxi/v1" +) + +// Database is the interface that all database objects must implement. +type Database interface { + Type() (string, error) + CreateUser(statements Statements, usernamePrefix string, expiration time.Time) (username string, password string, err error) + RenewUser(statements Statements, username string, expiration time.Time) error + RevokeUser(statements Statements, username string) error + + Initialize(config map[string]interface{}, verifyConnection bool) error + Close() error +} + +// Statements set in role creation and passed into the database type's functions. +type Statements struct { + CreationStatements string `json:"creation_statments" mapstructure:"creation_statements" structs:"creation_statments"` + RevocationStatements string `json:"revocation_statements" mapstructure:"revocation_statements" structs:"revocation_statements"` + RollbackStatements string `json:"rollback_statements" mapstructure:"rollback_statements" structs:"rollback_statements"` + RenewStatements string `json:"renew_statements" mapstructure:"renew_statements" structs:"renew_statements"` +} + +// PluginFactory is used to build plugin database types. It wraps the database +// object in a logging and metrics middleware. +func PluginFactory(pluginName string, sys pluginutil.LookRunnerUtil, logger log.Logger) (Database, error) { + // Look for plugin in the plugin catalog + pluginRunner, err := sys.LookupPlugin(pluginName) + if err != nil { + return nil, err + } + + var db Database + if pluginRunner.Builtin { + // Plugin is builtin so we can retrieve an instance of the interface + // from the pluginRunner. Then cast it to a Database. + dbRaw, err := pluginRunner.BuiltinFactory() + if err != nil { + return nil, fmt.Errorf("error getting plugin type: %s", err) + } + + var ok bool + db, ok = dbRaw.(Database) + if !ok { + return nil, fmt.Errorf("unsuported database type: %s", pluginName) + } + + } else { + // create a DatabasePluginClient instance + db, err = newPluginClient(sys, pluginRunner) + if err != nil { + return nil, err + } + } + + typeStr, err := db.Type() + if err != nil { + return nil, fmt.Errorf("error getting plugin type: %s", err) + } + + // Wrap with metrics middleware + db = &databaseMetricsMiddleware{ + next: db, + typeStr: typeStr, + } + + // Wrap with tracing middleware + if logger.IsTrace() { + db = &databaseTracingMiddleware{ + next: db, + typeStr: typeStr, + logger: logger, + } + } + + return db, nil +} + +// handshakeConfigs are used to just do a basic handshake between +// a plugin and host. If the handshake fails, a user friendly error is shown. +// This prevents users from executing bad plugins or executing a plugin +// directory. It is a UX feature, not a security feature. +var handshakeConfig = plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "VAULT_DATABASE_PLUGIN", + MagicCookieValue: "926a0820-aea2-be28-51d6-83cdf00e8edb", +} + +// DatabasePlugin implements go-plugin's Plugin interface. It has methods for +// retrieving a server and a client instance of the plugin. +type DatabasePlugin struct { + impl Database +} + +func (d DatabasePlugin) Server(*plugin.MuxBroker) (interface{}, error) { + return &databasePluginRPCServer{impl: d.impl}, nil +} + +func (DatabasePlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { + return &databasePluginRPCClient{client: c}, nil +} + +// ---- RPC Request Args Domain ---- + +type InitializeRequest struct { + Config map[string]interface{} + VerifyConnection bool +} + +type CreateUserRequest struct { + Statements Statements + UsernamePrefix string + Expiration time.Time +} + +type RenewUserRequest struct { + Statements Statements + Username string + Expiration time.Time +} + +type RevokeUserRequest struct { + Statements Statements + Username string +} + +// ---- RPC Response Args Domain ---- + +type CreateUserResponse struct { + Username string + Password string +} diff --git a/builtin/logical/database/dbplugin/plugin_test.go b/builtin/logical/database/dbplugin/plugin_test.go new file mode 100644 index 000000000..c95e119e0 --- /dev/null +++ b/builtin/logical/database/dbplugin/plugin_test.go @@ -0,0 +1,248 @@ +package dbplugin_test + +import ( + "errors" + stdhttp "net/http" + "os" + "testing" + "time" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/plugins" + "github.com/hashicorp/vault/vault" + log "github.com/mgutz/logxi/v1" +) + +type mockPlugin struct { + users map[string][]string +} + +func (m *mockPlugin) Type() (string, error) { return "mock", nil } +func (m *mockPlugin) CreateUser(statements dbplugin.Statements, usernamePrefix string, expiration time.Time) (username string, password string, err error) { + err = errors.New("err") + if usernamePrefix == "" || expiration.IsZero() { + return "", "", err + } + + if _, ok := m.users[usernamePrefix]; ok { + return "", "", err + } + + m.users[usernamePrefix] = []string{password} + + return usernamePrefix, "test", nil +} +func (m *mockPlugin) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error { + err := errors.New("err") + if username == "" || expiration.IsZero() { + return err + } + + if _, ok := m.users[username]; !ok { + return err + } + + return nil +} +func (m *mockPlugin) RevokeUser(statements dbplugin.Statements, username string) error { + err := errors.New("err") + if username == "" { + return err + } + + if _, ok := m.users[username]; !ok { + return err + } + + delete(m.users, username) + return nil +} +func (m *mockPlugin) Initialize(conf map[string]interface{}, _ bool) error { + err := errors.New("err") + if len(conf) != 1 { + return err + } + + return nil +} +func (m *mockPlugin) Close() error { + m.users = nil + return nil +} + +func getCore(t *testing.T) ([]*vault.TestClusterCore, logical.SystemView) { + coreConfig := &vault.CoreConfig{} + + handler1 := stdhttp.NewServeMux() + handler2 := stdhttp.NewServeMux() + handler3 := stdhttp.NewServeMux() + + // Chicken-and-egg: Handler needs a core. So we create handlers first, then + // add routes chained to a Handler-created handler. + cores := vault.TestCluster(t, []stdhttp.Handler{handler1, handler2, handler3}, coreConfig, false) + handler1.Handle("/", http.Handler(cores[0].Core)) + handler2.Handle("/", http.Handler(cores[1].Core)) + handler3.Handle("/", http.Handler(cores[2].Core)) + + core := cores[0] + + sys := vault.TestDynamicSystemView(core.Core) + vault.TestAddTestPlugin(t, core.Core, "test-plugin", "TestPlugin_Main") + + return cores, sys +} + +// This is not an actual test case, it's a helper function that will be executed +// by the go-plugin client via an exec call. +func TestPlugin_Main(t *testing.T) { + if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" { + return + } + + plugin := &mockPlugin{ + users: make(map[string][]string), + } + + args := []string{"--tls-skip-verify=true"} + + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(args) + + plugins.Serve(plugin, apiClientMeta.GetTLSConfig()) +} + +func TestPlugin_Initialize(t *testing.T) { + cores, sys := getCore(t) + for _, core := range cores { + defer core.CloseListeners() + } + + dbRaw, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{}) + if err != nil { + t.Fatalf("err: %s", err) + } + + connectionDetails := map[string]interface{}{ + "test": 1, + } + + err = dbRaw.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = dbRaw.Close() + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestPlugin_CreateUser(t *testing.T) { + cores, sys := getCore(t) + for _, core := range cores { + defer core.CloseListeners() + } + + db, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{}) + if err != nil { + t.Fatalf("err: %s", err) + } + defer db.Close() + + connectionDetails := map[string]interface{}{ + "test": 1, + } + + err = db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + us, pw, err := db.CreateUser(dbplugin.Statements{}, "test", time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + if us != "test" || pw != "test" { + t.Fatal("expected username and password to be 'test'") + } + + // try and save the same user again to verify it saved the first time, this + // should return an error + _, _, err = db.CreateUser(dbplugin.Statements{}, "test", time.Now().Add(time.Minute)) + if err == nil { + t.Fatal("expected an error, user wasn't created correctly") + } +} + +func TestPlugin_RenewUser(t *testing.T) { + cores, sys := getCore(t) + for _, core := range cores { + defer core.CloseListeners() + } + + db, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{}) + if err != nil { + t.Fatalf("err: %s", err) + } + defer db.Close() + + connectionDetails := map[string]interface{}{ + "test": 1, + } + err = db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + us, _, err := db.CreateUser(dbplugin.Statements{}, "test", time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = db.RenewUser(dbplugin.Statements{}, us, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestPlugin_RevokeUser(t *testing.T) { + cores, sys := getCore(t) + for _, core := range cores { + defer core.CloseListeners() + } + + db, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{}) + if err != nil { + t.Fatalf("err: %s", err) + } + defer db.Close() + + connectionDetails := map[string]interface{}{ + "test": 1, + } + err = db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + us, _, err := db.CreateUser(dbplugin.Statements{}, "test", time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test default revoke statememts + err = db.RevokeUser(dbplugin.Statements{}, us) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Try adding the same username back so we can verify it was removed + _, _, err = db.CreateUser(dbplugin.Statements{}, "test", time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } +} diff --git a/builtin/logical/database/dbplugin/server.go b/builtin/logical/database/dbplugin/server.go new file mode 100644 index 000000000..9546d092c --- /dev/null +++ b/builtin/logical/database/dbplugin/server.go @@ -0,0 +1,71 @@ +package dbplugin + +import ( + "crypto/tls" + + "github.com/hashicorp/go-plugin" +) + +// Serve is called from within a plugin and wraps the provided +// Database implementation in a databasePluginRPCServer object and starts a +// RPC server. +func Serve(db Database, tlsProvider func() (*tls.Config, error)) { + dbPlugin := &DatabasePlugin{ + impl: db, + } + + // pluginMap is the map of plugins we can dispense. + var pluginMap = map[string]plugin.Plugin{ + "database": dbPlugin, + } + + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: handshakeConfig, + Plugins: pluginMap, + TLSProvider: tlsProvider, + }) +} + +// ---- RPC server domain ---- + +// databasePluginRPCServer implements an RPC version of Database and is run +// inside a plugin. It wraps an underlying implementation of Database. +type databasePluginRPCServer struct { + impl Database +} + +func (ds *databasePluginRPCServer) Type(_ struct{}, resp *string) error { + var err error + *resp, err = ds.impl.Type() + return err +} + +func (ds *databasePluginRPCServer) CreateUser(args *CreateUserRequest, resp *CreateUserResponse) error { + var err error + resp.Username, resp.Password, err = ds.impl.CreateUser(args.Statements, args.UsernamePrefix, args.Expiration) + + return err +} + +func (ds *databasePluginRPCServer) RenewUser(args *RenewUserRequest, _ *struct{}) error { + err := ds.impl.RenewUser(args.Statements, args.Username, args.Expiration) + + return err +} + +func (ds *databasePluginRPCServer) RevokeUser(args *RevokeUserRequest, _ *struct{}) error { + err := ds.impl.RevokeUser(args.Statements, args.Username) + + return err +} + +func (ds *databasePluginRPCServer) Initialize(args *InitializeRequest, _ *struct{}) error { + err := ds.impl.Initialize(args.Config, args.VerifyConnection) + + return err +} + +func (ds *databasePluginRPCServer) Close(_ struct{}, _ *struct{}) error { + ds.impl.Close() + return nil +} diff --git a/builtin/logical/database/path_config_connection.go b/builtin/logical/database/path_config_connection.go new file mode 100644 index 000000000..e84212bb8 --- /dev/null +++ b/builtin/logical/database/path_config_connection.go @@ -0,0 +1,270 @@ +package database + +import ( + "errors" + "fmt" + + "github.com/fatih/structs" + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +var ( + respErrEmptyPluginName = "empty plugin name" + respErrEmptyName = "empty name attribute given" +) + +// DatabaseConfig is used by the Factory function to configure a Database +// object. +type DatabaseConfig struct { + PluginName string `json:"plugin_name" structs:"plugin_name" mapstructure:"plugin_name"` + // ConnectionDetails stores the database specific connection settings needed + // by each database type. + ConnectionDetails map[string]interface{} `json:"connection_details" structs:"connection_details" mapstructure:"connection_details"` + AllowedRoles []string `json:"allowed_roles" structs:"allowed_roles" mapstructure:"allowed_roles"` +} + +// pathResetConnection configures a path to reset a plugin. +func pathResetConnection(b *databaseBackend) *framework.Path { + return &framework.Path{ + Pattern: fmt.Sprintf("reset/%s", framework.GenericNameRegex("name")), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of this database connection", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathConnectionReset(), + }, + + HelpSynopsis: pathResetConnectionHelpSyn, + HelpDescription: pathResetConnectionHelpDesc, + } +} + +// pathConnectionReset resets a plugin by closing the existing instance and +// creating a new one. +func (b *databaseBackend) pathConnectionReset() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse(respErrEmptyName), nil + } + + // Grab the mutex lock + b.Lock() + defer b.Unlock() + + // Close plugin and delete the entry in the connections cache. + b.clearConnection(name) + + // Execute plugin again, we don't need the object so throw away. + _, err := b.createDBObj(req.Storage, name) + if err != nil { + return nil, err + } + + return nil, nil + } +} + +// pathConfigurePluginConnection returns a configured framework.Path setup to +// operate on plugins. +func pathConfigurePluginConnection(b *databaseBackend) *framework.Path { + return &framework.Path{ + Pattern: fmt.Sprintf("config/%s", framework.GenericNameRegex("name")), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of this database connection", + }, + + "plugin_name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The name of a builtin or previously registered + plugin known to vault. This endpoint will create an instance of + that plugin type.`, + }, + + "verify_connection": &framework.FieldSchema{ + Type: framework.TypeBool, + Default: true, + Description: `If true, the connection details are verified by + actually connecting to the database. Defaults to true.`, + }, + + "allowed_roles": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `Comma separated string or array of the role names + allowed to get creds from this database connection. If empty no + roles are allowed. If "*" all roles are allowed.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.connectionWriteHandler(), + logical.ReadOperation: b.connectionReadHandler(), + logical.DeleteOperation: b.connectionDeleteHandler(), + }, + + HelpSynopsis: pathConfigConnectionHelpSyn, + HelpDescription: pathConfigConnectionHelpDesc, + } +} + +// connectionReadHandler reads out the connection configuration +func (b *databaseBackend) connectionReadHandler() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse(respErrEmptyName), nil + } + + entry, err := req.Storage.Get(fmt.Sprintf("config/%s", name)) + if err != nil { + return nil, errors.New("failed to read connection configuration") + } + if entry == nil { + return nil, nil + } + + var config DatabaseConfig + if err := entry.DecodeJSON(&config); err != nil { + return nil, err + } + return &logical.Response{ + Data: structs.New(config).Map(), + }, nil + } +} + +// connectionDeleteHandler deletes the connection configuration +func (b *databaseBackend) connectionDeleteHandler() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse(respErrEmptyName), nil + } + + err := req.Storage.Delete(fmt.Sprintf("config/%s", name)) + if err != nil { + return nil, errors.New("failed to delete connection configuration") + } + + b.Lock() + defer b.Unlock() + + if _, ok := b.connections[name]; ok { + err = b.connections[name].Close() + if err != nil { + return nil, err + } + + delete(b.connections, name) + } + + return nil, nil + } +} + +// connectionWriteHandler returns a handler function for creating and updating +// both builtin and plugin database types. +func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + pluginName := data.Get("plugin_name").(string) + if pluginName == "" { + return logical.ErrorResponse(respErrEmptyPluginName), nil + } + + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse(respErrEmptyName), nil + } + + verifyConnection := data.Get("verify_connection").(bool) + + allowedRoles := data.Get("allowed_roles").([]string) + + // Remove these entries from the data before we store it keyed under + // ConnectionDetails. + delete(data.Raw, "name") + delete(data.Raw, "plugin_name") + delete(data.Raw, "allowed_roles") + delete(data.Raw, "verify_connection") + + config := &DatabaseConfig{ + ConnectionDetails: data.Raw, + PluginName: pluginName, + AllowedRoles: allowedRoles, + } + + db, err := dbplugin.PluginFactory(config.PluginName, b.System(), b.logger) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("error creating database object: %s", err)), nil + } + + err = db.Initialize(config.ConnectionDetails, verifyConnection) + if err != nil { + db.Close() + return logical.ErrorResponse(fmt.Sprintf("error creating database object: %s", err)), nil + } + + // Grab the mutex lock + b.Lock() + defer b.Unlock() + + // Close and remove the old connection + b.clearConnection(name) + + // Save the new connection + b.connections[name] = db + + // Store it + entry, err := logical.StorageEntryJSON(fmt.Sprintf("config/%s", name), config) + if err != nil { + return nil, err + } + if err := req.Storage.Put(entry); err != nil { + return nil, err + } + + resp := &logical.Response{} + resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any.") + + return resp, nil + } +} + +const pathConfigConnectionHelpSyn = ` +Configure connection details to a database plugin. +` + +const pathConfigConnectionHelpDesc = ` +This path configures the connection details used to connect to a particular +database. This path runs the provided plugin name and passes the configured +connection details to the plugin. See the documentation for the plugin specified +for a full list of accepted connection details. + +In addition to the database specific connection details, this endpoint also +accepts: + + * "plugin_name" (required) - The name of a builtin or previously registered + plugin known to vault. This endpoint will create an instance of that + plugin type. + + * "verify_connection" (default: true) - A boolean value denoting if the plugin should verify + it is able to connect to the database using the provided connection + details. +` + +const pathResetConnectionHelpSyn = ` +Resets a database plugin. +` + +const pathResetConnectionHelpDesc = ` +This path resets the database connection by closing the existing database plugin +instance and running a new one. +` diff --git a/builtin/logical/database/path_creds_create.go b/builtin/logical/database/path_creds_create.go new file mode 100644 index 000000000..7bc7dfa6f --- /dev/null +++ b/builtin/logical/database/path_creds_create.go @@ -0,0 +1,106 @@ +package database + +import ( + "fmt" + "time" + + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func pathCredsCreate(b *databaseBackend) *framework.Path { + return &framework.Path{ + Pattern: "creds/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of the role.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathCredsCreateRead(), + }, + + HelpSynopsis: pathCredsCreateReadHelpSyn, + HelpDescription: pathCredsCreateReadHelpDesc, + } +} + +func (b *databaseBackend) pathCredsCreateRead() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + // Get the role + role, err := b.Role(req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil + } + + dbConfig, err := b.DatabaseConfig(req.Storage, role.DBName) + if err != nil { + return nil, err + } + + // If role name isn't in the database's allowed roles, send back a + // permission denied. + if !strutil.StrListContains(dbConfig.AllowedRoles, "*") && !strutil.StrListContains(dbConfig.AllowedRoles, name) { + return nil, logical.ErrPermissionDenied + } + + // Grab the read lock + b.RLock() + var unlockFunc func() = b.RUnlock + + // Get the Database object + db, ok := b.getDBObj(role.DBName) + if !ok { + // Upgrade lock + b.RUnlock() + b.Lock() + unlockFunc = b.Unlock + + // Create a new DB object + db, err = b.createDBObj(req.Storage, role.DBName) + if err != nil { + unlockFunc() + return nil, fmt.Errorf("cound not retrieve db with name: %s, got error: %s", role.DBName, err) + } + } + + expiration := time.Now().Add(role.DefaultTTL) + + // Create the user + username, password, err := db.CreateUser(role.Statements, req.DisplayName, expiration) + // Unlock + unlockFunc() + if err != nil { + b.closeIfShutdown(role.DBName, err) + return nil, err + } + + resp := b.Secret(SecretCredsType).Response(map[string]interface{}{ + "username": username, + "password": password, + }, map[string]interface{}{ + "username": username, + "role": name, + }) + resp.Secret.TTL = role.DefaultTTL + return resp, nil + } +} + +const pathCredsCreateReadHelpSyn = ` +Request database credentials for a certain role. +` + +const pathCredsCreateReadHelpDesc = ` +This path reads database credentials for a certain role. The +database credentials will be generated on demand and will be automatically +revoked when the lease is up. +` diff --git a/builtin/logical/database/path_roles.go b/builtin/logical/database/path_roles.go new file mode 100644 index 000000000..8be33c0a1 --- /dev/null +++ b/builtin/logical/database/path_roles.go @@ -0,0 +1,233 @@ +package database + +import ( + "time" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func pathListRoles(b *databaseBackend) *framework.Path { + return &framework.Path{ + Pattern: "roles/?$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList(), + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +func pathRoles(b *databaseBackend) *framework.Path { + return &framework.Path{ + Pattern: "roles/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role.", + }, + + "db_name": { + Type: framework.TypeString, + Description: "Name of the database this role acts on.", + }, + "creation_statements": { + Type: framework.TypeString, + Description: `Statements to be executed to create a user. Must be a semicolon-separated + string, a base64-encoded semicolon-separated string, a serialized JSON string + array, or a base64-encoded serialized JSON string array. The '{{name}}', + '{{password}}', and '{{expiration}}' values will be substituted.`, + }, + "revocation_statements": { + Type: framework.TypeString, + Description: `Statements to be executed to revoke a user. Must be a semicolon-separated + string, a base64-encoded semicolon-separated string, a serialized JSON string + array, or a base64-encoded serialized JSON string array. The '{{name}}' value + will be substituted.`, + }, + "renew_statements": { + Type: framework.TypeString, + Description: `Statements to be executed to renew a user. Must be a semicolon-separated + string, a base64-encoded semicolon-separated string, a serialized JSON string + array, or a base64-encoded serialized JSON string array. The '{{name}}' value + will be substituted.`, + }, + "rollback_statements": { + Type: framework.TypeString, + Description: `Statements to be executed to revoke a user. Must be a semicolon-separated + string, a base64-encoded semicolon-separated string, a serialized JSON string + array, or a base64-encoded serialized JSON string array. The '{{name}}' value + will be substituted.`, + }, + + "default_ttl": { + Type: framework.TypeDurationSecond, + Description: "Default ttl for role.", + }, + + "max_ttl": { + Type: framework.TypeDurationSecond, + Description: "Maximum time a credential is valid for", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRoleRead(), + logical.UpdateOperation: b.pathRoleCreate(), + logical.DeleteOperation: b.pathRoleDelete(), + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +func (b *databaseBackend) pathRoleDelete() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete("role/" + data.Get("name").(string)) + if err != nil { + return nil, err + } + + return nil, nil + } +} + +func (b *databaseBackend) pathRoleRead() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + role, err := b.Role(req.Storage, data.Get("name").(string)) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "db_name": role.DBName, + "creation_statements": role.Statements.CreationStatements, + "revocation_statements": role.Statements.RevocationStatements, + "rollback_statements": role.Statements.RollbackStatements, + "renew_statements": role.Statements.RenewStatements, + "default_ttl": role.DefaultTTL.Seconds(), + "max_ttl": role.MaxTTL.Seconds(), + }, + }, nil + } +} + +func (b *databaseBackend) pathRoleList() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List("role/") + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil + } +} + +func (b *databaseBackend) pathRoleCreate() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse("empty role name attribute given"), nil + } + + dbName := data.Get("db_name").(string) + if dbName == "" { + return logical.ErrorResponse("empty database name attribute given"), nil + } + + // Get statements + creationStmts := data.Get("creation_statements").(string) + revocationStmts := data.Get("revocation_statements").(string) + rollbackStmts := data.Get("rollback_statements").(string) + renewStmts := data.Get("renew_statements").(string) + + // Get TTLs + defaultTTLRaw := data.Get("default_ttl").(int) + maxTTLRaw := data.Get("max_ttl").(int) + defaultTTL := time.Duration(defaultTTLRaw) * time.Second + maxTTL := time.Duration(maxTTLRaw) * time.Second + + statements := dbplugin.Statements{ + CreationStatements: creationStmts, + RevocationStatements: revocationStmts, + RollbackStatements: rollbackStmts, + RenewStatements: renewStmts, + } + + // Store it + entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{ + DBName: dbName, + Statements: statements, + DefaultTTL: defaultTTL, + MaxTTL: maxTTL, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(entry); err != nil { + return nil, err + } + + return nil, nil + } +} + +type roleEntry struct { + DBName string `json:"db_name" mapstructure:"db_name" structs:"db_name"` + Statements dbplugin.Statements `json:"statments" mapstructure:"statements" structs:"statments"` + DefaultTTL time.Duration `json:"default_ttl" mapstructure:"default_ttl" structs:"default_ttl"` + MaxTTL time.Duration `json:"max_ttl" mapstructure:"max_ttl" structs:"max_ttl"` +} + +const pathRoleHelpSyn = ` +Manage the roles that can be created with this backend. +` + +const pathRoleHelpDesc = ` +This path lets you manage the roles that can be created with this backend. + +The "db_name" parameter is required and configures the name of the database +connection to use. + +The "creation_statements" parameter customizes the string used to create the +credentials. This can be a sequence of SQL queries, or other statement formats +for a particular database type. Some substitution will be done to the statement +strings for certain keys. The names of the variables must be surrounded by "{{" +and "}}" to be replaced. + + * "name" - The random username generated for the DB user. + + * "password" - The random password generated for the DB user. + + * "expiration" - The timestamp when this user will expire. + +Example of a decent creation_statements for a postgresql database plugin: + + CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; + +The "revocation_statements" parameter customizes the statement string used to +revoke a user. Example of a decent revocation_statements for a postgresql +database plugin: + + REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}}; + REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}}; + REVOKE USAGE ON SCHEMA public FROM {{name}}; + DROP ROLE IF EXISTS {{name}}; + +The "renew_statements" parameter customizes the statement string used to renew a +user. +The "rollback_statements' parameter customizes the statement string used to +rollback a change if needed. +` diff --git a/builtin/logical/database/secret_creds.go b/builtin/logical/database/secret_creds.go new file mode 100644 index 000000000..c3dfcb973 --- /dev/null +++ b/builtin/logical/database/secret_creds.go @@ -0,0 +1,139 @@ +package database + +import ( + "fmt" + + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +const SecretCredsType = "creds" + +func secretCreds(b *databaseBackend) *framework.Secret { + return &framework.Secret{ + Type: SecretCredsType, + Fields: map[string]*framework.FieldSchema{}, + + Renew: b.secretCredsRenew(), + Revoke: b.secretCredsRevoke(), + } +} + +func (b *databaseBackend) secretCredsRenew() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Get the username from the internal data + usernameRaw, ok := req.Secret.InternalData["username"] + if !ok { + return nil, fmt.Errorf("secret is missing username internal data") + } + username, ok := usernameRaw.(string) + + roleNameRaw, ok := req.Secret.InternalData["role"] + if !ok { + return nil, fmt.Errorf("could not find role with name: %s", req.Secret.InternalData["role"]) + } + + role, err := b.Role(req.Storage, roleNameRaw.(string)) + if err != nil { + return nil, err + } + if role == nil { + return nil, fmt.Errorf("error during renew: could not find role with name %s", req.Secret.InternalData["role"]) + } + + f := framework.LeaseExtend(role.DefaultTTL, role.MaxTTL, b.System()) + resp, err := f(req, data) + if err != nil { + return nil, err + } + + // Grab the read lock + b.RLock() + var unlockFunc func() = b.RUnlock + + // Get the Database object + db, ok := b.getDBObj(role.DBName) + if !ok { + // Upgrade lock + b.RUnlock() + b.Lock() + unlockFunc = b.Unlock + + // Create a new DB object + db, err = b.createDBObj(req.Storage, role.DBName) + if err != nil { + unlockFunc() + return nil, fmt.Errorf("cound not retrieve db with name: %s, got error: %s", role.DBName, err) + } + } + + // Make sure we increase the VALID UNTIL endpoint for this user. + if expireTime := resp.Secret.ExpirationTime(); !expireTime.IsZero() { + err := db.RenewUser(role.Statements, username, expireTime) + // Unlock + unlockFunc() + if err != nil { + b.closeIfShutdown(role.DBName, err) + return nil, err + } + } + + return resp, nil + } +} + +func (b *databaseBackend) secretCredsRevoke() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Get the username from the internal data + usernameRaw, ok := req.Secret.InternalData["username"] + if !ok { + return nil, fmt.Errorf("secret is missing username internal data") + } + username, ok := usernameRaw.(string) + + var resp *logical.Response + + roleNameRaw, ok := req.Secret.InternalData["role"] + if !ok { + return nil, fmt.Errorf("no role name was provided") + } + + role, err := b.Role(req.Storage, roleNameRaw.(string)) + if err != nil { + return nil, err + } + if role == nil { + return nil, fmt.Errorf("error during revoke: could not find role with name %s", req.Secret.InternalData["role"]) + } + + // Grab the read lock + b.RLock() + var unlockFunc func() = b.RUnlock + + // Get our connection + db, ok := b.getDBObj(role.DBName) + if !ok { + // Upgrade lock + b.RUnlock() + b.Lock() + unlockFunc = b.Unlock + + // Create a new DB object + db, err = b.createDBObj(req.Storage, role.DBName) + if err != nil { + unlockFunc() + return nil, fmt.Errorf("cound not retrieve db with name: %s, got error: %s", role.DBName, err) + } + } + + err = db.RevokeUser(role.Statements, username) + // Unlock + unlockFunc() + if err != nil { + b.closeIfShutdown(role.DBName, err) + return nil, err + } + + return resp, nil + } +} diff --git a/cli/commands.go b/cli/commands.go index 72abe953a..0d1945f11 100644 --- a/cli/commands.go +++ b/cli/commands.go @@ -21,6 +21,7 @@ import ( "github.com/hashicorp/vault/builtin/logical/aws" "github.com/hashicorp/vault/builtin/logical/cassandra" "github.com/hashicorp/vault/builtin/logical/consul" + "github.com/hashicorp/vault/builtin/logical/database" "github.com/hashicorp/vault/builtin/logical/mongodb" "github.com/hashicorp/vault/builtin/logical/mssql" "github.com/hashicorp/vault/builtin/logical/mysql" @@ -92,6 +93,7 @@ func Commands(metaPtr *meta.Meta) map[string]cli.CommandFactory { "mysql": mysql.Factory, "ssh": ssh.Factory, "rabbitmq": rabbitmq.Factory, + "database": database.Factory, "totp": totp.Factory, }, ShutdownCh: command.MakeShutdownCh(), diff --git a/command/server.go b/command/server.go index c09db3de2..c5b952ce8 100644 --- a/command/server.go +++ b/command/server.go @@ -238,6 +238,7 @@ func (c *ServerCommand) Run(args []string) int { DefaultLeaseTTL: config.DefaultLeaseTTL, ClusterName: config.ClusterName, CacheSize: config.CacheSize, + PluginDirectory: config.PluginDirectory, } if dev { coreConfig.DevToken = devRootTokenID diff --git a/command/server/config.go b/command/server/config.go index e6ea123f4..dad485928 100644 --- a/command/server/config.go +++ b/command/server/config.go @@ -42,7 +42,8 @@ type Config struct { DefaultLeaseTTL time.Duration `hcl:"-"` DefaultLeaseTTLRaw interface{} `hcl:"default_lease_ttl"` - ClusterName string `hcl:"cluster_name"` + ClusterName string `hcl:"cluster_name"` + PluginDirectory string `hcl:"plugin_directory"` } // DevConfig is a Config that is used for dev mode of Vault. @@ -272,6 +273,11 @@ func (c *Config) Merge(c2 *Config) *Config { result.EnableUI = c2.EnableUI } + result.PluginDirectory = c.PluginDirectory + if c2.PluginDirectory != "" { + result.PluginDirectory = c2.PluginDirectory + } + return result } @@ -363,6 +369,7 @@ func ParseConfig(d string, logger log.Logger) (*Config, error) { "default_lease_ttl", "max_lease_ttl", "cluster_name", + "plugin_directory", } if err := checkHCLKeys(list, valid); err != nil { return nil, err diff --git a/helper/builtinplugins/builtin.go b/helper/builtinplugins/builtin.go new file mode 100644 index 000000000..a2100e931 --- /dev/null +++ b/helper/builtinplugins/builtin.go @@ -0,0 +1,40 @@ +package builtinplugins + +import ( + "github.com/hashicorp/vault/plugins/database/cassandra" + "github.com/hashicorp/vault/plugins/database/mssql" + "github.com/hashicorp/vault/plugins/database/mysql" + "github.com/hashicorp/vault/plugins/database/postgresql" +) + +type BuiltinFactory func() (interface{}, error) + +var plugins map[string]BuiltinFactory = map[string]BuiltinFactory{ + // These four plugins all use the same mysql implementation but with + // different username settings passed by the constructor. + "mysql-database-plugin": mysql.New(mysql.DisplayNameLen, mysql.UsernameLen), + "mysql-aurora-database-plugin": mysql.New(mysql.LegacyDisplayNameLen, mysql.LegacyUsernameLen), + "mysql-rds-database-plugin": mysql.New(mysql.LegacyDisplayNameLen, mysql.LegacyUsernameLen), + "mysql-legacy-database-plugin": mysql.New(mysql.LegacyDisplayNameLen, mysql.LegacyUsernameLen), + + "postgresql-database-plugin": postgresql.New, + "mssql-database-plugin": mssql.New, + "cassandra-database-plugin": cassandra.New, +} + +func Get(name string) (BuiltinFactory, bool) { + f, ok := plugins[name] + return f, ok +} + +func Keys() []string { + keys := make([]string, len(plugins)) + + i := 0 + for k := range plugins { + keys[i] = k + i++ + } + + return keys +} diff --git a/helper/pluginutil/mlock.go b/helper/pluginutil/mlock.go new file mode 100644 index 000000000..dd9115a89 --- /dev/null +++ b/helper/pluginutil/mlock.go @@ -0,0 +1,23 @@ +package pluginutil + +import ( + "os" + + "github.com/hashicorp/vault/helper/mlock" +) + +var ( + // PluginUnwrapTokenEnv is the ENV name used to pass the configuration for + // enabling mlock + PluginMlockEnabled = "VAULT_PLUGIN_MLOCK_ENABLED" +) + +// OptionallyEnableMlock determines if mlock should be called, and if so enables +// mlock. +func OptionallyEnableMlock() error { + if os.Getenv(PluginMlockEnabled) == "true" { + return mlock.LockMemory() + } + + return nil +} diff --git a/helper/pluginutil/runner.go b/helper/pluginutil/runner.go new file mode 100644 index 000000000..4b25ba16b --- /dev/null +++ b/helper/pluginutil/runner.go @@ -0,0 +1,130 @@ +package pluginutil + +import ( + "crypto/sha256" + "flag" + "fmt" + "os/exec" + "time" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/wrapping" +) + +// Looker defines the plugin Lookup function that looks into the plugin catalog +// for availible plugins and returns a PluginRunner +type Looker interface { + LookupPlugin(string) (*PluginRunner, error) +} + +// Wrapper interface defines the functions needed by the runner to wrap the +// metadata needed to run a plugin process. This includes looking up Mlock +// configuration and wrapping data in a respose wrapped token. +type RunnerUtil interface { + ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) + MlockEnabled() bool +} + +// LookWrapper defines the functions for both Looker and Wrapper +type LookRunnerUtil interface { + Looker + RunnerUtil +} + +// PluginRunner defines the metadata needed to run a plugin securely with +// go-plugin. +type PluginRunner struct { + Name string `json:"name"` + Command string `json:"command"` + Args []string `json:"args"` + Sha256 []byte `json:"sha256"` + Builtin bool `json:"builtin"` + BuiltinFactory func() (interface{}, error) `json:"-"` +} + +// Run takes a wrapper instance, and the go-plugin paramaters and executes a +// plugin. +func (r *PluginRunner) Run(wrapper RunnerUtil, pluginMap map[string]plugin.Plugin, hs plugin.HandshakeConfig, env []string) (*plugin.Client, error) { + // Get a CA TLS Certificate + certBytes, key, err := generateCert() + if err != nil { + return nil, err + } + + // Use CA to sign a client cert and return a configured TLS config + clientTLSConfig, err := createClientTLSConfig(certBytes, key) + if err != nil { + return nil, err + } + + // Use CA to sign a server cert and wrap the values in a response wrapped + // token. + wrapToken, err := wrapServerConfig(wrapper, certBytes, key) + if err != nil { + return nil, err + } + + cmd := exec.Command(r.Command, r.Args...) + cmd.Env = append(cmd.Env, env...) + // Add the response wrap token to the ENV of the plugin + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, wrapToken)) + // Add the mlock setting to the ENV of the plugin + if wrapper.MlockEnabled() { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true")) + } + + secureConfig := &plugin.SecureConfig{ + Checksum: r.Sha256, + Hash: sha256.New(), + } + + client := plugin.NewClient(&plugin.ClientConfig{ + HandshakeConfig: hs, + Plugins: pluginMap, + Cmd: cmd, + TLSConfig: clientTLSConfig, + SecureConfig: secureConfig, + }) + + return client, nil +} + +type APIClientMeta struct { + // These are set by the command line flags. + flagCACert string + flagCAPath string + flagClientCert string + flagClientKey string + flagInsecure bool +} + +func (f *APIClientMeta) FlagSet() *flag.FlagSet { + fs := flag.NewFlagSet("tls settings", flag.ContinueOnError) + + fs.StringVar(&f.flagCACert, "ca-cert", "", "") + fs.StringVar(&f.flagCAPath, "ca-path", "", "") + fs.StringVar(&f.flagClientCert, "client-cert", "", "") + fs.StringVar(&f.flagClientKey, "client-key", "", "") + fs.BoolVar(&f.flagInsecure, "tls-skip-verify", false, "") + + return fs +} + +func (f *APIClientMeta) GetTLSConfig() *api.TLSConfig { + // If we need custom TLS configuration, then set it + if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure { + t := &api.TLSConfig{ + CACert: f.flagCACert, + CAPath: f.flagCAPath, + ClientCert: f.flagClientCert, + ClientKey: f.flagClientKey, + TLSServerName: "", + Insecure: f.flagInsecure, + } + + return t + } + + return nil +} diff --git a/helper/pluginutil/tls.go b/helper/pluginutil/tls.go new file mode 100644 index 000000000..1a7fbe783 --- /dev/null +++ b/helper/pluginutil/tls.go @@ -0,0 +1,227 @@ +package pluginutil + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "errors" + "fmt" + "net/url" + "os" + "time" + + "github.com/SermoDigital/jose/jws" + "github.com/hashicorp/errwrap" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/certutil" +) + +var ( + // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the + // plugin. + PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN" +) + +// generateCert is used internally to create certificates for the plugin +// client and server. +func generateCert() ([]byte, *ecdsa.PrivateKey, error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, nil, err + } + + host, err := uuid.GenerateUUID() + if err != nil { + return nil, nil, err + } + + sn, err := certutil.GenerateSerialNumber() + if err != nil { + return nil, nil, err + } + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: sn, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + IsCA: true, + } + + certBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, nil, errwrap.Wrapf("unable to generate client certificate: {{err}}", err) + } + + return certBytes, key, nil +} + +// createClientTLSConfig creates a signed certificate and returns a configured +// TLS config. +func createClientTLSConfig(certBytes []byte, key *ecdsa.PrivateKey) (*tls.Config, error) { + clientCert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, fmt.Errorf("error parsing generated plugin certificate: %v", err) + } + + cert := tls.Certificate{ + Certificate: [][]byte{certBytes}, + PrivateKey: key, + Leaf: clientCert, + } + + clientCertPool := x509.NewCertPool() + clientCertPool.AddCert(clientCert) + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: clientCertPool, + ServerName: clientCert.Subject.CommonName, + MinVersion: tls.VersionTLS12, + } + + tlsConfig.BuildNameToCertificate() + + return tlsConfig, nil +} + +// wrapServerConfig is used to create a server certificate and private key, then +// wrap them in an unwrap token for later retrieval by the plugin. +func wrapServerConfig(sys RunnerUtil, certBytes []byte, key *ecdsa.PrivateKey) (string, error) { + rawKey, err := x509.MarshalECPrivateKey(key) + if err != nil { + return "", err + } + + wrapInfo, err := sys.ResponseWrapData(map[string]interface{}{ + "ServerCert": certBytes, + "ServerKey": rawKey, + }, time.Second*10, true) + if err != nil { + return "", err + } + + return wrapInfo.Token, nil +} + +// VaultPluginTLSProvider is run inside a plugin and retrives the response +// wrapped TLS certificate from vault. It returns a configured TLS Config. +func VaultPluginTLSProvider(apiTLSConfig *api.TLSConfig) func() (*tls.Config, error) { + return func() (*tls.Config, error) { + unwrapToken := os.Getenv(PluginUnwrapTokenEnv) + + // Parse the JWT and retrieve the vault address + wt, err := jws.ParseJWT([]byte(unwrapToken)) + if err != nil { + return nil, errors.New(fmt.Sprintf("error decoding token: %s", err)) + } + if wt == nil { + return nil, errors.New("nil decoded token") + } + + addrRaw := wt.Claims().Get("addr") + if addrRaw == nil { + return nil, errors.New("decoded token does not contain primary cluster address") + } + vaultAddr, ok := addrRaw.(string) + if !ok { + return nil, errors.New("decoded token's address not valid") + } + if vaultAddr == "" { + return nil, errors.New(`no address for the vault found`) + } + + // Sanity check the value + if _, err := url.Parse(vaultAddr); err != nil { + return nil, errors.New(fmt.Sprintf("error parsing the vault address: %s", err)) + } + + // Unwrap the token + clientConf := api.DefaultConfig() + clientConf.Address = vaultAddr + if apiTLSConfig != nil { + clientConf.ConfigureTLS(apiTLSConfig) + } + client, err := api.NewClient(clientConf) + if err != nil { + return nil, errwrap.Wrapf("error during api client creation: {{err}}", err) + } + + secret, err := client.Logical().Unwrap(unwrapToken) + if err != nil { + return nil, errwrap.Wrapf("error during token unwrap request: {{err}}", err) + } + if secret == nil { + return nil, errors.New("error during token unwrap request secret is nil") + } + + // Retrieve and parse the server's certificate + serverCertBytesRaw, ok := secret.Data["ServerCert"].(string) + if !ok { + return nil, errors.New("error unmarshalling certificate") + } + + serverCertBytes, err := base64.StdEncoding.DecodeString(serverCertBytesRaw) + if err != nil { + return nil, fmt.Errorf("error parsing certificate: %v", err) + } + + serverCert, err := x509.ParseCertificate(serverCertBytes) + if err != nil { + return nil, fmt.Errorf("error parsing certificate: %v", err) + } + + // Retrieve and parse the server's private key + serverKeyB64, ok := secret.Data["ServerKey"].(string) + if !ok { + return nil, errors.New("error unmarshalling certificate") + } + + serverKeyRaw, err := base64.StdEncoding.DecodeString(serverKeyB64) + if err != nil { + return nil, fmt.Errorf("error parsing certificate: %v", err) + } + + serverKey, err := x509.ParseECPrivateKey(serverKeyRaw) + if err != nil { + return nil, fmt.Errorf("error parsing certificate: %v", err) + } + + // Add CA cert to the cert pool + caCertPool := x509.NewCertPool() + caCertPool.AddCert(serverCert) + + // Build a certificate object out of the server's cert and private key. + cert := tls.Certificate{ + Certificate: [][]byte{serverCertBytes}, + PrivateKey: serverKey, + Leaf: serverCert, + } + + // Setup TLS config + tlsConfig := &tls.Config{ + ClientCAs: caCertPool, + RootCAs: caCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + // TLS 1.2 minimum + MinVersion: tls.VersionTLS12, + Certificates: []tls.Certificate{cert}, + } + tlsConfig.BuildNameToCertificate() + + return tlsConfig, nil + } +} diff --git a/helper/strutil/strutil.go b/helper/strutil/strutil.go index 7c7f64d3d..986928e0e 100644 --- a/helper/strutil/strutil.go +++ b/helper/strutil/strutil.go @@ -29,6 +29,19 @@ func StrListSubset(super, sub []string) bool { return true } +// Parses a comma separated list of strings into a slice of strings. +// The return slice will be sorted and will not contain duplicate or +// empty items. +func ParseDedupAndSortStrings(input string, sep string) []string { + input = strings.TrimSpace(input) + parsed := []string{} + if input == "" { + // Don't return nil + return parsed + } + return RemoveDuplicates(strings.Split(input, sep), false) +} + // Parses a comma separated list of strings into a slice of strings. // The return slice will be sorted and will not contain duplicate or // empty items. The values will be converted to lower case. diff --git a/helper/wrapping/wrapinfo.go b/helper/wrapping/wrapinfo.go new file mode 100644 index 000000000..a27219b8a --- /dev/null +++ b/helper/wrapping/wrapinfo.go @@ -0,0 +1,23 @@ +package wrapping + +import "time" + +type ResponseWrapInfo struct { + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"` + + // The token containing the wrapped response + Token string `json:"token" structs:"token" mapstructure:"token"` + + // The creation time. This can be used with the TTL to figure out an + // expected expiration. + CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"cration_time"` + + // If the contained response is the output of a token creation call, the + // created token's accessor will be accessible here + WrappedAccessor string `json:"wrapped_accessor" structs:"wrapped_accessor" mapstructure:"wrapped_accessor"` + + // The format to use. This doesn't get returned, it's only internal. + Format string `json:"format" structs:"format" mapstructure:"format"` +} diff --git a/logical/response.go b/logical/response.go index ee6bfe1e2..2a4646a2c 100644 --- a/logical/response.go +++ b/logical/response.go @@ -4,8 +4,8 @@ import ( "errors" "fmt" "reflect" - "time" + "github.com/hashicorp/vault/helper/wrapping" "github.com/mitchellh/copystructure" ) @@ -28,26 +28,6 @@ const ( HTTPStatusCode = "http_status_code" ) -type ResponseWrapInfo struct { - // Setting to non-zero specifies that the response should be wrapped. - // Specifies the desired TTL of the wrapping token. - TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"` - - // The token containing the wrapped response - Token string `json:"token" structs:"token" mapstructure:"token"` - - // The creation time. This can be used with the TTL to figure out an - // expected expiration. - CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"cration_time"` - - // If the contained response is the output of a token creation call, the - // created token's accessor will be accessible here - WrappedAccessor string `json:"wrapped_accessor" structs:"wrapped_accessor" mapstructure:"wrapped_accessor"` - - // The format to use. This doesn't get returned, it's only internal. - Format string `json:"format" structs:"format" mapstructure:"format"` -} - // Response is a struct that stores the response of a request. // It is used to abstract the details of the higher level request protocol. type Response struct { @@ -78,7 +58,7 @@ type Response struct { warnings []string `json:"warnings" structs:"warnings" mapstructure:"warnings"` // Information for wrapping the response in a cubbyhole - WrapInfo *ResponseWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"` + WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"` } func init() { @@ -123,7 +103,7 @@ func init() { if err != nil { return nil, fmt.Errorf("error copying WrapInfo: %v", err) } - ret.WrapInfo = retWrapInfo.(*ResponseWrapInfo) + ret.WrapInfo = retWrapInfo.(*wrapping.ResponseWrapInfo) } return &ret, nil diff --git a/logical/system_view.go b/logical/system_view.go index d769397df..64fc51c7b 100644 --- a/logical/system_view.go +++ b/logical/system_view.go @@ -1,9 +1,12 @@ package logical import ( + "errors" "time" "github.com/hashicorp/vault/helper/consts" + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/helper/wrapping" ) // SystemView exposes system configuration information in a safe way @@ -37,6 +40,18 @@ type SystemView interface { // ReplicationState indicates the state of cluster replication ReplicationState() consts.ReplicationState + + // ResponseWrapData wraps the given data in a cubbyhole and returns the + // token used to unwrap. + ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) + + // LookupPlugin looks into the plugin catalog for a plugin with the given + // name. Returns a PluginRunner or an error if a plugin can not be found. + LookupPlugin(string) (*pluginutil.PluginRunner, error) + + // MlockEnabled returns the configuration setting for enabling mlock on + // plugins. + MlockEnabled() bool } type StaticSystemView struct { @@ -46,6 +61,7 @@ type StaticSystemView struct { TaintedVal bool CachingDisabledVal bool Primary bool + EnableMlock bool ReplicationStateVal consts.ReplicationState } @@ -72,3 +88,15 @@ func (d StaticSystemView) CachingDisabled() bool { func (d StaticSystemView) ReplicationState() consts.ReplicationState { return d.ReplicationStateVal } + +func (d StaticSystemView) ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) { + return nil, errors.New("ResponseWrapData is not implemented in StaticSystemView") +} + +func (d StaticSystemView) LookupPlugin(name string) (*pluginutil.PluginRunner, error) { + return nil, errors.New("LookupPlugin is not implemented in StaticSystemView") +} + +func (d StaticSystemView) MlockEnabled() bool { + return d.EnableMlock +} diff --git a/plugins/database/cassandra/cassandra-database-plugin/main.go b/plugins/database/cassandra/cassandra-database-plugin/main.go new file mode 100644 index 000000000..c70997897 --- /dev/null +++ b/plugins/database/cassandra/cassandra-database-plugin/main.go @@ -0,0 +1,21 @@ +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/plugins/database/cassandra" +) + +func main() { + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args) + + err := cassandra.Run(apiClientMeta.GetTLSConfig()) + if err != nil { + log.Println(err) + os.Exit(1) + } +} diff --git a/plugins/database/cassandra/cassandra.go b/plugins/database/cassandra/cassandra.go new file mode 100644 index 000000000..60e445ff6 --- /dev/null +++ b/plugins/database/cassandra/cassandra.go @@ -0,0 +1,169 @@ +package cassandra + +import ( + "strings" + "time" + + "github.com/gocql/gocql" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/plugins" + "github.com/hashicorp/vault/plugins/helper/database/connutil" + "github.com/hashicorp/vault/plugins/helper/database/credsutil" + "github.com/hashicorp/vault/plugins/helper/database/dbutil" +) + +const ( + defaultUserCreationCQL = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER;` + defaultUserDeletionCQL = `DROP USER '{{username}}';` + cassandraTypeName = "cassandra" +) + +// Cassandra is an implementation of Database interface +type Cassandra struct { + connutil.ConnectionProducer + credsutil.CredentialsProducer +} + +// New returns a new Cassandra instance +func New() (interface{}, error) { + connProducer := &connutil.CassandraConnectionProducer{} + connProducer.Type = cassandraTypeName + + credsProducer := &credsutil.CassandraCredentialsProducer{} + + dbType := &Cassandra{ + ConnectionProducer: connProducer, + CredentialsProducer: credsProducer, + } + + return dbType, nil +} + +// Run instantiates a Cassandra object, and runs the RPC server for the plugin +func Run(apiTLSConfig *api.TLSConfig) error { + dbType, err := New() + if err != nil { + return err + } + + plugins.Serve(dbType.(*Cassandra), apiTLSConfig) + + return nil +} + +// Type returns the TypeName for this backend +func (c *Cassandra) Type() (string, error) { + return cassandraTypeName, nil +} + +func (c *Cassandra) getConnection() (*gocql.Session, error) { + session, err := c.Connection() + if err != nil { + return nil, err + } + + return session.(*gocql.Session), nil +} + +// CreateUser generates the username/password on the underlying Cassandra secret backend as instructed by +// the CreationStatement provided. +func (c *Cassandra) CreateUser(statements dbplugin.Statements, usernamePrefix string, expiration time.Time) (username string, password string, err error) { + // Grab the lock + c.Lock() + defer c.Unlock() + + // Get the connection + session, err := c.getConnection() + if err != nil { + return "", "", err + } + + creationCQL := statements.CreationStatements + if creationCQL == "" { + creationCQL = defaultUserCreationCQL + } + rollbackCQL := statements.RollbackStatements + if rollbackCQL == "" { + rollbackCQL = defaultUserDeletionCQL + } + + username, err = c.GenerateUsername(usernamePrefix) + if err != nil { + return "", "", err + } + + password, err = c.GeneratePassword() + if err != nil { + return "", "", err + } + + // Execute each query + for _, query := range strutil.ParseArbitraryStringSlice(creationCQL, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + err = session.Query(dbutil.QueryHelper(query, map[string]string{ + "username": username, + "password": password, + })).Exec() + if err != nil { + for _, query := range strutil.ParseArbitraryStringSlice(rollbackCQL, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + session.Query(dbutil.QueryHelper(query, map[string]string{ + "username": username, + })).Exec() + } + return "", "", err + } + } + + return username, password, nil +} + +// RenewUser is not supported on Cassandra, so this is a no-op. +func (c *Cassandra) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error { + // NOOP + return nil +} + +// RevokeUser attempts to drop the specified user. +func (c *Cassandra) RevokeUser(statements dbplugin.Statements, username string) error { + // Grab the lock + c.Lock() + defer c.Unlock() + + session, err := c.getConnection() + if err != nil { + return err + } + + revocationCQL := statements.RevocationStatements + if revocationCQL == "" { + revocationCQL = defaultUserDeletionCQL + } + + var result *multierror.Error + for _, query := range strutil.ParseArbitraryStringSlice(revocationCQL, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + err := session.Query(dbutil.QueryHelper(query, map[string]string{ + "username": username, + })).Exec() + + result = multierror.Append(result, err) + } + + return result.ErrorOrNil() +} diff --git a/plugins/database/cassandra/cassandra_test.go b/plugins/database/cassandra/cassandra_test.go new file mode 100644 index 000000000..9e98ec48f --- /dev/null +++ b/plugins/database/cassandra/cassandra_test.go @@ -0,0 +1,230 @@ +package cassandra + +import ( + "os" + "strconv" + "testing" + "time" + + "fmt" + + "github.com/gocql/gocql" + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/plugins/helper/database/connutil" + dockertest "gopkg.in/ory-am/dockertest.v3" +) + +func prepareCassandraTestContainer(t *testing.T) (cleanup func(), retURL string) { + if os.Getenv("CASSANDRA_HOST") != "" { + return func() {}, os.Getenv("CASSANDRA_HOST") + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("Failed to connect to docker: %s", err) + } + + cwd, _ := os.Getwd() + cassandraMountPath := fmt.Sprintf("%s/test-fixtures/:/etc/cassandra/", cwd) + + ro := &dockertest.RunOptions{ + Repository: "cassandra", + Tag: "latest", + Mounts: []string{cassandraMountPath}, + } + resource, err := pool.RunWithOptions(ro) + if err != nil { + t.Fatalf("Could not start local cassandra docker container: %s", err) + } + + cleanup = func() { + err := pool.Purge(resource) + if err != nil { + t.Fatalf("Failed to cleanup local container: %s", err) + } + } + + retURL = fmt.Sprintf("localhost:%s", resource.GetPort("9042/tcp")) + port, _ := strconv.Atoi(resource.GetPort("9042/tcp")) + + // exponential backoff-retry + if err = pool.Retry(func() error { + clusterConfig := gocql.NewCluster(retURL) + clusterConfig.Authenticator = gocql.PasswordAuthenticator{ + Username: "cassandra", + Password: "cassandra", + } + clusterConfig.ProtoVersion = 4 + clusterConfig.Port = port + + session, err := clusterConfig.CreateSession() + if err != nil { + return fmt.Errorf("error creating session: %s", err) + } + defer session.Close() + return nil + }); err != nil { + t.Fatalf("Could not connect to cassandra docker container: %s", err) + } + return +} + +func TestCassandra_Initialize(t *testing.T) { + cleanup, connURL := prepareCassandraTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "hosts": connURL, + "username": "cassandra", + "password": "cassandra", + "protocol_version": 4, + } + + dbRaw, _ := New() + db := dbRaw.(*Cassandra) + connProducer := db.ConnectionProducer.(*connutil.CassandraConnectionProducer) + + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !connProducer.Initialized { + t.Fatal("Database should be initalized") + } + + err = db.Close() + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestCassandra_CreateUser(t *testing.T) { + cleanup, connURL := prepareCassandraTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "hosts": connURL, + "username": "cassandra", + "password": "cassandra", + "protocol_version": 4, + } + + dbRaw, _ := New() + db := dbRaw.(*Cassandra) + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + CreationStatements: testCassandraRole, + } + + username, password, err := db.CreateUser(statements, "test", time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } +} + +func TestMyCassandra_RenewUser(t *testing.T) { + cleanup, connURL := prepareCassandraTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "hosts": connURL, + "username": "cassandra", + "password": "cassandra", + "protocol_version": 4, + } + + dbRaw, _ := New() + db := dbRaw.(*Cassandra) + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + CreationStatements: testCassandraRole, + } + + username, password, err := db.CreateUser(statements, "test", time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + err = db.RenewUser(statements, username, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestCassandra_RevokeUser(t *testing.T) { + cleanup, connURL := prepareCassandraTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "hosts": connURL, + "username": "cassandra", + "password": "cassandra", + "protocol_version": 4, + } + + dbRaw, _ := New() + db := dbRaw.(*Cassandra) + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + CreationStatements: testCassandraRole, + } + + username, password, err := db.CreateUser(statements, "test", time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Test default revoke statememts + err = db.RevokeUser(statements, username) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } +} + +func testCredsExist(t testing.TB, connURL, username, password string) error { + clusterConfig := gocql.NewCluster(connURL) + clusterConfig.Authenticator = gocql.PasswordAuthenticator{ + Username: username, + Password: password, + } + clusterConfig.ProtoVersion = 4 + + session, err := clusterConfig.CreateSession() + if err != nil { + return fmt.Errorf("error creating session: %s", err) + } + defer session.Close() + return nil +} + +const testCassandraRole = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER; +GRANT ALL PERMISSIONS ON ALL KEYSPACES TO {{username}};` diff --git a/plugins/database/cassandra/test-fixtures/cassandra.yaml b/plugins/database/cassandra/test-fixtures/cassandra.yaml new file mode 100644 index 000000000..54f47d34a --- /dev/null +++ b/plugins/database/cassandra/test-fixtures/cassandra.yaml @@ -0,0 +1,1146 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting on the node's initial start, +# on subsequent starts, this setting will apply even if initial token is set. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +num_tokens: 256 + +# Triggers automatic allocation of num_tokens tokens for this node. The allocation +# algorithm attempts to choose tokens in a way that optimizes replicated load over +# the nodes in the datacenter for the replication strategy used by the specified +# keyspace. +# +# The load assigned to each node will be close to proportional to its number of +# vnodes. +# +# Only supported with the Murmur3Partitioner. +# allocate_tokens_for_keyspace: KEYSPACE + +# initial_token allows you to specify tokens manually. While you can use it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +# May either be "true" or "false" to enable globally +hinted_handoff_enabled: true + +# When hinted_handoff_enabled is true, a black list of data centers that will not +# perform hinted handoff +# hinted_handoff_disabled_datacenters: +# - DC1 +# - DC2 + +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours + +# Maximum throttle in KBs per second, per delivery thread. This will be +# reduced proportionally to the number of nodes in the cluster. (If there +# are two nodes in the cluster, each delivery thread will use the maximum +# rate; if there are three, each will throttle to half of the maximum, +# since we expect two nodes to be delivering hints simultaneously.) +hinted_handoff_throttle_in_kb: 1024 + +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# Directory where Cassandra should store hints. +# If not set, the default directory is $CASSANDRA_HOME/data/hints. +# hints_directory: /var/lib/cassandra/hints + +# How often hints should be flushed from the internal buffers to disk. +# Will *not* trigger fsync. +hints_flush_period_in_ms: 10000 + +# Maximum size for a single hints file, in megabytes. +max_hints_file_size_in_mb: 128 + +# Compression to apply to the hint files. If omitted, hints files +# will be written uncompressed. LZ4, Snappy, and Deflate compressors +# are supported. +#hints_compression: +# - class_name: LZ4Compressor +# parameters: +# - + +# Maximum throttle in KBs per second, total. This will be +# reduced proportionally to the number of nodes in the cluster. +batchlog_replay_throttle_in_kb: 1024 + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) +authenticator: PasswordAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +authorizer: CassandraAuthorizer + +# Part of the Authentication & Authorization backend, implementing IRoleManager; used +# to maintain grants and memberships between roles. +# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, +# which stores role information in the system_auth keyspace. Most functions of the +# IRoleManager require an authenticated login, so unless the configured IAuthenticator +# actually implements authentication, most of this functionality will be unavailable. +# +# - CassandraRoleManager stores role data in the system_auth keyspace. Please +# increase system_auth keyspace replication factor if you use this role manager. +role_manager: CassandraRoleManager + +# Validity period for roles cache (fetching granted roles can be an expensive +# operation depending on the role manager, CassandraRoleManager is one example) +# Granted roles are cached for authenticated sessions in AuthenticatedUser and +# after the period specified here, become eligible for (async) reload. +# Defaults to 2000, set to 0 to disable caching entirely. +# Will be disabled automatically for AllowAllAuthenticator. +roles_validity_in_ms: 2000 + +# Refresh interval for roles cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If roles_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as roles_validity_in_ms. +# roles_update_interval_in_ms: 2000 + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +permissions_validity_in_ms: 2000 + +# Refresh interval for permissions cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If permissions_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as permissions_validity_in_ms. +# permissions_update_interval_in_ms: 2000 + +# Validity period for credentials cache. This cache is tightly coupled to +# the provided PasswordAuthenticator implementation of IAuthenticator. If +# another IAuthenticator implementation is configured, this cache will not +# be automatically used and so the following settings will have no effect. +# Please note, credentials are cached in their encrypted form, so while +# activating this cache may reduce the number of queries made to the +# underlying table, it may not bring a significant reduction in the +# latency of individual authentication attempts. +# Defaults to 2000, set to 0 to disable credentials caching. +credentials_validity_in_ms: 2000 + +# Refresh interval for credentials cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If credentials_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as credentials_validity_in_ms. +# credentials_update_interval_in_ms: 2000 + +# The partitioner is responsible for distributing groups of rows (by +# partition key) across nodes in the cluster. You should leave this +# alone for new clusters. The partitioner can NOT be changed without +# reloading all data, so when upgrading you should set this to the +# same partitioner you were already using. +# +# Besides Murmur3Partitioner, partitioners included for backwards +# compatibility include RandomPartitioner, ByteOrderedPartitioner, and +# OrderPreservingPartitioner. +# +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Directories where Cassandra should store data on disk. Cassandra +# will spread data evenly across them, subject to the granularity of +# the configured compaction strategy. +# If not set, the default directory is $CASSANDRA_HOME/data/data. +data_file_directories: + - /var/lib/cassandra/data + +# commit log. when running on magnetic HDD, this should be a +# separate spindle than the data directories. +# If not set, the default directory is $CASSANDRA_HOME/data/commitlog. +commitlog_directory: /var/lib/cassandra/commitlog + +# Enable / disable CDC functionality on a per-node basis. This modifies the logic used +# for write path allocation rejection (standard: never reject. cdc: reject Mutation +# containing a CDC-enabled table if at space limit in cdc_raw_directory). +cdc_enabled: false + +# CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the +# segment contains mutations for a CDC-enabled table. This should be placed on a +# separate spindle than the data directories. If not set, the default directory is +# $CASSANDRA_HOME/data/cdc_raw. +# cdc_raw_directory: /var/lib/cassandra/cdc_raw + +# Policy for data disk failures: +# +# die +# shut down gossip and client transports and kill the JVM for any fs errors or +# single-sstable errors, so the node can be replaced. +# +# stop_paranoid +# shut down gossip and client transports even for single-sstable errors, +# kill the JVM for errors during startup. +# +# stop +# shut down gossip and client transports, leaving the node effectively dead, but +# can still be inspected via JMX, kill the JVM for errors during startup. +# +# best_effort +# stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# +# ignore +# ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# Policy for commit disk failures: +# +# die +# shut down gossip and Thrift and kill the JVM, so the node can be replaced. +# +# stop +# shut down gossip and Thrift, leaving the node effectively dead, but +# can still be inspected via JMX. +# +# stop_commit +# shutdown the commit log, letting writes collect but +# continuing to service reads, as in pre-2.0.5 Cassandra +# +# ignore +# ignore fatal errors and let the batches fail +commit_failure_policy: stop + +# Maximum size of the native protocol prepared statement cache +# +# Valid values are either "auto" (omitting the value) or a value greater 0. +# +# Note that specifying a too large value will result in long running GCs and possbily +# out-of-memory errors. Keep the value at a small fraction of the heap. +# +# If you constantly see "prepared statements discarded in the last minute because +# cache limit reached" messages, the first step is to investigate the root cause +# of these messages and check whether prepared statements are used correctly - +# i.e. use bind markers for variable parts. +# +# Do only change the default value, if you really have more prepared statements than +# fit in the cache. In most cases it is not neccessary to change this value. +# Constantly re-preparing statements is a performance penalty. +# +# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater +prepared_statements_cache_size_mb: + +# Maximum size of the Thrift prepared statement cache +# +# If you do not use Thrift at all, it is safe to leave this value at "auto". +# +# See description of 'prepared_statements_cache_size_mb' above for more information. +# +# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater +thrift_prepared_statements_cache_size_mb: + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must contain the entire row, +# so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the key cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Row cache implementation class name. Available implementations: +# +# org.apache.cassandra.cache.OHCProvider +# Fully off-heap row cache implementation (default). +# +# org.apache.cassandra.cache.SerializingCacheProvider +# This is the row cache implementation availabile +# in previous releases of Cassandra. +# row_cache_class_name: org.apache.cassandra.cache.OHCProvider + +# Maximum size of the row cache in memory. +# Please note that OHC cache implementation requires some additional off-heap memory to manage +# the map structures and some in-flight memory during operations before/after cache entries can be +# accounted against the cache capacity. This overhead is usually small compared to the whole capacity. +# Do not specify more memory that the system can afford in the worst usual situation and leave some +# headroom for OS block level cache. Do never allow your system to swap. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should save the row cache. +# Caches are saved to saved_caches_directory as specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save. +# Specify 0 (which is the default), meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# Maximum size of the counter cache in memory. +# +# Counter cache helps to reduce counter locks' contention for hot counter cells. +# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before +# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration +# of the lock hold, helping with hot counter cell updates, but will not allow skipping +# the read entirely. Only the local (clock, count) tuple of a counter cell is kept +# in memory, not the whole counter, so it's relatively cheap. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. +# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. +counter_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the counter cache (keys only). Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Default is 7200 or 2 hours. +counter_cache_save_period: 7200 + +# Number of keys from the counter cache to save +# Disabled by default, meaning all keys are going to be saved +# counter_cache_keys_to_save: 100 + +# saved caches +# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. +saved_caches_directory: /var/lib/cassandra/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait +# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. +# This window should be kept short because the writer threads will +# be unable to do extra work while waiting. (You may need to increase +# concurrent_writes for the same reason.) +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 2 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +# Max mutation size is also configurable via max_mutation_size_in_kb setting in +# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. +# +# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must +# be set to at least twice the size of max_mutation_size_in_kb / 1024 +# +commitlog_segment_size_in_mb: 32 + +# Compression to apply to the commit log. If omitted, the commit log +# will be written uncompressed. LZ4, Snappy, and Deflate compressors +# are supported. +# commitlog_compression: +# - class_name: LZ4Compressor +# parameters: +# - + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: ",," + - seeds: "172.17.0.2" + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. Same applies to +# "concurrent_counter_writes", since counter writes read the current +# values before incrementing and writing them back. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 +concurrent_counter_writes: 32 + +# For materialized view writes, as there is a read involved, so this should +# be limited by the less of concurrent reads or concurrent writes. +concurrent_materialized_view_writes: 32 + +# Maximum memory to use for sstable chunk cache and buffer pooling. +# 32MB of this are reserved for pooling buffers, the rest is used as an +# cache that holds uncompressed sstable chunks. +# Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, +# so is in addition to the memory allocated for heap. The cache also has on-heap +# overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size +# if the default 64k chunk size is used). +# Memory is only allocated when needed. +# file_cache_size_in_mb: 512 + +# Flag indicating whether to allocate on or off heap when the sstable buffer +# pool is exhausted, that is when it has exceeded the maximum memory +# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. + +# buffer_pool_use_heap_if_exhausted: true + +# The strategy for optimizing disk read +# Possible values are: +# ssd (for solid state disks, the default) +# spinning (for spinning disks) +# disk_optimization_strategy: ssd + +# Total permitted memory to use for memtables. Cassandra will stop +# accepting writes when the limit is exceeded until a flush completes, +# and will trigger a flush based on memtable_cleanup_threshold +# If omitted, Cassandra will set both to 1/4 the size of the heap. +# memtable_heap_space_in_mb: 2048 +# memtable_offheap_space_in_mb: 2048 + +# Ratio of occupied non-flushing memtable size to total permitted size +# that will trigger a flush of the largest memtable. Larger mct will +# mean larger flushes and hence less compaction, but also less concurrent +# flush activity which can make it difficult to keep your disks fed +# under heavy write load. +# +# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) +# memtable_cleanup_threshold: 0.11 + +# Specify the way Cassandra allocates and manages memtable memory. +# Options are: +# +# heap_buffers +# on heap nio buffers +# +# offheap_buffers +# off heap (direct) nio buffers +# +# offheap_objects +# off heap objects +memtable_allocation_type: heap_buffers + +# Total space to use for commit logs on disk. +# +# If space gets above this value, Cassandra will flush every dirty CF +# in the oldest segment and remove it. So a small total commitlog space +# will tend to cause more flush activity on less-active columnfamilies. +# +# The default value is the smaller of 8192, and 1/4 of the total space +# of the commitlog volume. +# +# commitlog_total_space_in_mb: 8192 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. +# +# memtable_flush_writers defaults to one per data_file_directory. +# +# If your data directories are backed by SSD, you can increase this, but +# avoid having memtable_flush_writers * data_file_directories > number of cores +#memtable_flush_writers: 1 + +# Total space to use for change-data-capture logs on disk. +# +# If space gets above this value, Cassandra will throw WriteTimeoutException +# on Mutations including tables with CDC enabled. A CDCCompactor is responsible +# for parsing the raw CDC logs and deleting them when parsing is completed. +# +# The default value is the min of 4096 mb and 1/8th of the total space +# of the drive where cdc_raw_directory resides. +# cdc_total_space_in_mb: 4096 + +# When we hit our cdc_raw limit and the CDCCompactor is either running behind +# or experiencing backpressure, we check at the following interval to see if any +# new space for cdc-tracked tables has been made available. Default to 250ms +# cdc_free_space_check_interval_ms: 250 + +# A fixed memory pool size in MB for for SSTable index summaries. If left +# empty, this will default to 5% of the heap size. If the memory usage of +# all index summaries exceeds this limit, SSTables with low read rates will +# shrink their index summaries in order to meet this limit. However, this +# is a best-effort process. In extreme conditions Cassandra may need to use +# more than this amount of memory. +index_summary_capacity_in_mb: + +# How frequently index summaries should be resampled. This is done +# periodically to redistribute memory from the fixed-size pool to sstables +# proportional their recent read rates. Setting to -1 will disable this +# process, leaving existing index summaries at their current sampling level. +index_summary_resize_interval_in_minutes: 60 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSDs; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +ssl_storage_port: 7001 + +# Address or interface to bind to and tell other Cassandra nodes to connect to. +# You _must_ change this if you want multiple nodes to be able to communicate! +# +# Set listen_address OR listen_interface, not both. +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing _if_ the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting listen_address to 0.0.0.0 is always wrong. +# +listen_address: 172.17.0.2 + +# Set listen_address OR listen_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# listen_interface: eth0 + +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +# listen_interface_prefer_ipv6: false + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +broadcast_address: 172.17.0.2 + +# When using multiple physical network interfaces, set this +# to true to listen on broadcast_address in addition to +# the listen_address, allowing nodes to communicate in both +# interfaces. +# Ignore this property if the network configuration automatically +# routes between the public and private networks such as EC2. +# listen_on_broadcast_address: false + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +native_transport_port: 9042 +# Enabling native transport encryption in client_encryption_options allows you to either use +# encryption for the standard port or to use a dedicated, additional port along with the unencrypted +# standard native_transport_port. +# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption +# for native_transport_port. Setting native_transport_port_ssl to a different value +# from native_transport_port will use encryption for native_transport_port_ssl while +# keeping native_transport_port unencrypted. +# native_transport_port_ssl: 9142 +# The maximum threads for handling requests when the native transport is used. +# This is similar to rpc_max_threads though the default differs slightly (and +# there is no native_transport_min_threads, idle threads will always be stopped +# after 30 seconds). +# native_transport_max_threads: 128 +# +# The maximum size of allowed frame. Frame (requests) larger than this will +# be rejected as invalid. The default is 256MB. If you're changing this parameter, +# you may want to adjust max_value_size_in_mb accordingly. +# native_transport_max_frame_size_in_mb: 256 + +# The maximum number of concurrent client connections. +# The default is -1, which means unlimited. +# native_transport_max_concurrent_connections: -1 + +# The maximum number of concurrent client connections per source ip. +# The default is -1, which means unlimited. +# native_transport_max_concurrent_connections_per_ip: -1 + +# Whether to start the thrift rpc server. +start_rpc: false + +# The address or interface to bind the Thrift RPC service and native transport +# server to. +# +# Set rpc_address OR rpc_interface, not both. +# +# Leaving rpc_address blank has the same effect as on listen_address +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike listen_address, you can specify 0.0.0.0, but you must also +# set broadcast_rpc_address to a value other than 0.0.0.0. +# +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +rpc_address: 0.0.0.0 + +# Set rpc_address OR rpc_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# rpc_interface: eth1 + +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +# rpc_interface_prefer_ipv6: false + +# port for Thrift to listen for clients on +rpc_port: 9160 + +# RPC address to broadcast to drivers and other Cassandra nodes. This cannot +# be set to 0.0.0.0. If left blank, this will be set to the value of +# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must +# be set. +broadcast_rpc_address: 172.17.0.2 + +# enable or disable keepalive on rpc/native connections +rpc_keepalive: true + +# Cassandra provides two out-of-the-box options for the RPC Server: +# +# sync +# One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha +# Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). If hsha is selected then it is essential +# that rpc_max_threads is changed from the default value of unlimited. +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provides no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See also: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and 'man tcp' +# internode_send_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum message length). +thrift_framed_transport_size_in_mb: 15 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Granularity of the collation index of rows within a partition. +# Increase if your rows are large, or if you have a very large +# number of rows per partition. The competing goals are these: +# +# - a smaller granularity means more index entries are generated +# and looking up rows withing the partition by collation column +# is faster +# - but, Cassandra will keep the collation index in memory for hot +# rows (as part of the key cache), so a larger granularity means +# you can cache more hot rows +column_index_size_in_kb: 64 + +# Per sstable indexed key cache entries (the collation index in memory +# mentioned above) exceeding this size will not be held on heap. +# This means that only partition information is held on heap and the +# index entries are read from disk. +# +# Note that this size refers to the size of the +# serialized index information and not the size of the partition. +column_index_cache_size_in_kb: 2 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the smaller of (number of disks, +# number of cores), with a minimum of 2 and a maximum of 8. +# +# If your data directories are backed by SSD, you should increase this +# to the number of cores. +#concurrent_compactors: 1 + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# When compacting, the replacement sstable(s) can be opened before they +# are completely written, and used in place of the prior sstables for +# any range that has been written. This helps to smoothly transfer reads +# between the sstables, reducing page cache churn and keeping hot rows hot +sstable_preemptive_open_interval_in_mb: 50 + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# Throttles all streaming file transfer between the datacenters, +# this setting allows users to throttle inter dc stream throughput in addition +# to throttling all network stream traffic as configured with +# stream_throughput_outbound_megabits_per_sec +# When unset, the default is 200 Mbps or 25 MB/s +# inter_dc_stream_throughput_outbound_megabits_per_sec: 200 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 5000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 2000 +# How long the coordinator should wait for counter writes to complete +counter_write_request_timeout_in_ms: 5000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 10000 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts. If disabled, replicas will assume that requests +# were forwarded to them instantly by the coordinator, which means that +# under overload conditions we will waste that much extra time processing +# already-timed-out requests. +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Set socket timeout for streaming operation. +# The stream session is failed if no data/ack is received by any of the participants +# within that period, which means this should also be sufficient to stream a large +# sstable or rebuild table indexes. +# Default value is 86400000ms, which means stale streams timeout after 24 hours. +# A value of zero means stream sockets should never time out. +# streaming_socket_timeout_in_ms: 86400000 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH +# ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. +# This means that if you start with the default SimpleSnitch, which +# locates every node on "rack1" in "datacenter1", your only options +# if you need to add another datacenter are GossipingPropertyFileSnitch +# (and the older PFS). From there, if you want to migrate to an +# incompatible snitch like Ec2Snitch you can do it by adding new nodes +# under Ec2Snitch (which will locate them in a new "datacenter") and +# decommissioning the old ones. +# +# Out of the box, Cassandra provides: +# +# SimpleSnitch: +# Treats Strategy order as proximity. This can improve cache +# locality when disabling read repair. Only appropriate for +# single-datacenter deployments. +# +# GossipingPropertyFileSnitch +# This should be your go-to snitch for production use. The rack +# and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via +# gossip. If cassandra-topology.properties exists, it is used as a +# fallback, allowing migration from the PropertyFileSnitch. +# +# PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# +# Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# +# Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# +# RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's IP +# address, respectively. Unless this happens to match your +# deployment conventions, this is best used as an example of +# writing a custom Snitch class and is provided in that spirit. +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# +# NoScheduler +# Has no options +# +# RoundRobin +# throttle_limit +# The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# default_weight +# default_weight is optional and allows for +# overriding the default which is 1. +# weights +# Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifier based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# Enable or disable inter-node encryption +# JVM defaults for supported SSL socket protocols and cipher suites can +# be replaced using custom encryption options. This is not recommended +# unless you have policies in place that dictate certain settings, or +# need to disable vulnerable ciphers or protocols in case the JVM cannot +# be updated. +# FIPS compliant settings can be configured at JVM level and should not +# involve changing encryption settings here: +# https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html +# *NOTE* No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + # require_endpoint_verification: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + # If enabled and optional is set to true encrypted and unencrypted connections are handled. + optional: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set trustore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# Can be: +# +# all +# all traffic is compressed +# +# dc +# traffic between different datacenters is compressed +# +# none +# nothing is compressed. +internode_compression: dc + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +inter_dc_tcp_nodelay: false + +# TTL for different trace types used during logging of the repair process. +tracetype_query_ttl: 86400 +tracetype_repair_ttl: 604800 + +# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level +# This threshold can be adjusted to minimize logging if necessary +# gc_log_threshold_in_ms: 200 + +# If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at +# INFO level +# UDFs (user defined functions) are disabled by default. +# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code. +enable_user_defined_functions: false + +# Enables scripted UDFs (JavaScript UDFs). +# Java UDFs are always enabled, if enable_user_defined_functions is true. +# Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider. +# This option has no effect, if enable_user_defined_functions is false. +enable_scripted_user_defined_functions: false + +# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. +# Lowering this value on Windows can provide much tighter latency and better throughput, however +# some virtualized environments may see a negative performance impact from changing this setting +# below their system default. The sysinternals 'clockres' tool can confirm your system's default +# setting. +windows_timer_interval: 1 + + +# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from +# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by +# the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys +# can still (and should!) be in the keystore and will be used on decrypt operations +# (to handle the case of key rotation). +# +# It is strongly recommended to download and install Java Cryptography Extension (JCE) +# Unlimited Strength Jurisdiction Policy Files for your version of the JDK. +# (current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) +# +# Currently, only the following file types are supported for transparent data encryption, although +# more are coming in future cassandra releases: commitlog, hints +transparent_data_encryption_options: + enabled: false + chunk_length_kb: 64 + cipher: AES/CBC/PKCS5Padding + key_alias: testing:1 + # CBC IV length for AES needs to be 16 bytes (which is also the default size) + # iv_length: 16 + key_provider: + - class_name: org.apache.cassandra.security.JKSKeyProvider + parameters: + - keystore: conf/.keystore + keystore_password: cassandra + store_type: JCEKS + key_password: cassandra + + +##################### +# SAFETY THRESHOLDS # +##################### + +# When executing a scan, within or across a partition, we need to keep the +# tombstones seen in memory so we can return them to the coordinator, which +# will use them to make sure other replicas also know about the deleted rows. +# With workloads that generate a lot of tombstones, this can cause performance +# problems and even exaust the server heap. +# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) +# Adjust the thresholds here if you understand the dangers and want to +# scan more tombstones anyway. These thresholds may also be adjusted at runtime +# using the StorageService mbean. +tombstone_warn_threshold: 1000 +tombstone_failure_threshold: 100000 + +# Log WARN on any batch size exceeding this value. 5kb per batch by default. +# Caution should be taken on increasing the size of this threshold as it can lead to node instability. +batch_size_warn_threshold_in_kb: 5 + +# Fail any batch exceeding this value. 50kb (10x warn threshold) by default. +batch_size_fail_threshold_in_kb: 50 + +# Log WARN on any batches not of type LOGGED than span across more partitions than this limit +unlogged_batch_across_partitions_warn_threshold: 10 + +# Log a warning when compacting partitions larger than this value +compaction_large_partition_warning_threshold_mb: 100 + +# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level +# Adjust the threshold based on your application throughput requirement +# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level +gc_warn_threshold_in_ms: 1000 + +# Maximum size of any value in SSTables. Safety measure to detect SSTable corruption +# early. Any value size larger than this threshold will result into marking an SSTable +# as corrupted. +# max_value_size_in_mb: 256 diff --git a/plugins/database/mssql/mssql-database-plugin/main.go b/plugins/database/mssql/mssql-database-plugin/main.go new file mode 100644 index 000000000..5f05c5dff --- /dev/null +++ b/plugins/database/mssql/mssql-database-plugin/main.go @@ -0,0 +1,21 @@ +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/plugins/database/mssql" +) + +func main() { + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args) + + err := mssql.Run(apiClientMeta.GetTLSConfig()) + if err != nil { + log.Println(err) + os.Exit(1) + } +} diff --git a/plugins/database/mssql/mssql.go b/plugins/database/mssql/mssql.go new file mode 100644 index 000000000..9b22aa87c --- /dev/null +++ b/plugins/database/mssql/mssql.go @@ -0,0 +1,318 @@ +package mssql + +import ( + "database/sql" + "fmt" + "strings" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/plugins" + "github.com/hashicorp/vault/plugins/helper/database/connutil" + "github.com/hashicorp/vault/plugins/helper/database/credsutil" + "github.com/hashicorp/vault/plugins/helper/database/dbutil" +) + +const msSQLTypeName = "mssql" + +// MSSQL is an implementation of Database interface +type MSSQL struct { + connutil.ConnectionProducer + credsutil.CredentialsProducer +} + +func New() (interface{}, error) { + connProducer := &connutil.SQLConnectionProducer{} + connProducer.Type = msSQLTypeName + + credsProducer := &credsutil.SQLCredentialsProducer{ + DisplayNameLen: 20, + UsernameLen: 128, + } + + dbType := &MSSQL{ + ConnectionProducer: connProducer, + CredentialsProducer: credsProducer, + } + + return dbType, nil +} + +// Run instantiates a MSSQL object, and runs the RPC server for the plugin +func Run(apiTLSConfig *api.TLSConfig) error { + dbType, err := New() + if err != nil { + return err + } + + plugins.Serve(dbType.(*MSSQL), apiTLSConfig) + + return nil +} + +// Type returns the TypeName for this backend +func (m *MSSQL) Type() (string, error) { + return msSQLTypeName, nil +} + +func (m *MSSQL) getConnection() (*sql.DB, error) { + db, err := m.Connection() + if err != nil { + return nil, err + } + + return db.(*sql.DB), nil +} + +// CreateUser generates the username/password on the underlying MSSQL secret backend as instructed by +// the CreationStatement provided. +func (m *MSSQL) CreateUser(statements dbplugin.Statements, usernamePrefix string, expiration time.Time) (username string, password string, err error) { + // Grab the lock + m.Lock() + defer m.Unlock() + + // Get the connection + db, err := m.getConnection() + if err != nil { + return "", "", err + } + + if statements.CreationStatements == "" { + return "", "", dbutil.ErrEmptyCreationStatement + } + + username, err = m.GenerateUsername(usernamePrefix) + if err != nil { + return "", "", err + } + + password, err = m.GeneratePassword() + if err != nil { + return "", "", err + } + + expirationStr, err := m.GenerateExpiration(expiration) + if err != nil { + return "", "", err + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return "", "", err + } + defer tx.Rollback() + + // Execute each query + for _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{ + "name": username, + "password": password, + "expiration": expirationStr, + })) + if err != nil { + return "", "", err + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return "", "", err + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return "", "", err + } + + return username, password, nil +} + +// RenewUser is not supported on MSSQL, so this is a no-op. +func (m *MSSQL) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error { + // NOOP + return nil +} + +// RevokeUser attempts to drop the specified user. It will first attempt to disable login, +// then kill pending connections from that user, and finally drop the user and login from the +// database instance. +func (m *MSSQL) RevokeUser(statements dbplugin.Statements, username string) error { + if statements.RevocationStatements == "" { + return m.revokeUserDefault(username) + } + + // Get connection + db, err := m.getConnection() + if err != nil { + return err + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return err + } + defer tx.Rollback() + + // Execute each query + for _, query := range strutil.ParseArbitraryStringSlice(statements.RevocationStatements, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{ + "name": username, + })) + if err != nil { + return err + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return err + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +func (m *MSSQL) revokeUserDefault(username string) error { + // Get connection + db, err := m.getConnection() + if err != nil { + return err + } + + // First disable server login + disableStmt, err := db.Prepare(fmt.Sprintf("ALTER LOGIN [%s] DISABLE;", username)) + if err != nil { + return err + } + defer disableStmt.Close() + if _, err := disableStmt.Exec(); err != nil { + return err + } + + // Query for sessions for the login so that we can kill any outstanding + // sessions. There cannot be any active sessions before we drop the logins + // This isn't done in a transaction because even if we fail along the way, + // we want to remove as much access as possible + sessionStmt, err := db.Prepare(fmt.Sprintf( + "SELECT session_id FROM sys.dm_exec_sessions WHERE login_name = '%s';", username)) + if err != nil { + return err + } + defer sessionStmt.Close() + + sessionRows, err := sessionStmt.Query() + if err != nil { + return err + } + defer sessionRows.Close() + + var revokeStmts []string + for sessionRows.Next() { + var sessionID int + err = sessionRows.Scan(&sessionID) + if err != nil { + return err + } + revokeStmts = append(revokeStmts, fmt.Sprintf("KILL %d;", sessionID)) + } + + // Query for database users using undocumented stored procedure for now since + // it is the easiest way to get this information; + // we need to drop the database users before we can drop the login and the role + // This isn't done in a transaction because even if we fail along the way, + // we want to remove as much access as possible + stmt, err := db.Prepare(fmt.Sprintf("EXEC master.dbo.sp_msloginmappings '%s';", username)) + if err != nil { + return err + } + defer stmt.Close() + + rows, err := stmt.Query() + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + var loginName, dbName, qUsername string + var aliasName sql.NullString + err = rows.Scan(&loginName, &dbName, &qUsername, &aliasName) + if err != nil { + return err + } + revokeStmts = append(revokeStmts, fmt.Sprintf(dropUserSQL, dbName, username, username)) + } + + // we do not stop on error, as we want to remove as + // many permissions as possible right now + var lastStmtError error + for _, query := range revokeStmts { + stmt, err := db.Prepare(query) + if err != nil { + lastStmtError = err + continue + } + defer stmt.Close() + _, err = stmt.Exec() + if err != nil { + lastStmtError = err + } + } + + // can't drop if not all database users are dropped + if rows.Err() != nil { + return fmt.Errorf("cound not generate sql statements for all rows: %s", rows.Err()) + } + if lastStmtError != nil { + return fmt.Errorf("could not perform all sql statements: %s", lastStmtError) + } + + // Drop this login + stmt, err = db.Prepare(fmt.Sprintf(dropLoginSQL, username, username)) + if err != nil { + return err + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return err + } + + return nil +} + +const dropUserSQL = ` +USE [%s] +IF EXISTS + (SELECT name + FROM sys.database_principals + WHERE name = N'%s') +BEGIN + DROP USER [%s] +END +` + +const dropLoginSQL = ` +IF EXISTS + (SELECT name + FROM master.sys.server_principals + WHERE name = N'%s') +BEGIN + DROP LOGIN [%s] +END +` diff --git a/plugins/database/mssql/mssql_test.go b/plugins/database/mssql/mssql_test.go new file mode 100644 index 000000000..830e38abb --- /dev/null +++ b/plugins/database/mssql/mssql_test.go @@ -0,0 +1,167 @@ +package mssql + +import ( + "database/sql" + "fmt" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/plugins/helper/database/connutil" +) + +var ( + testMSQLImagePull sync.Once +) + +func TestMSSQL_Initialize(t *testing.T) { + if os.Getenv("MSSQL_URL") == "" || os.Getenv("VAULT_ACC") != "1" { + return + } + connURL := os.Getenv("MSSQL_URL") + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, _ := New() + db := dbRaw.(*MSSQL) + + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + connProducer := db.ConnectionProducer.(*connutil.SQLConnectionProducer) + if !connProducer.Initialized { + t.Fatal("Database should be initalized") + } + + err = db.Close() + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestMSSQL_CreateUser(t *testing.T) { + if os.Getenv("MSSQL_URL") == "" || os.Getenv("VAULT_ACC") != "1" { + return + } + connURL := os.Getenv("MSSQL_URL") + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, _ := New() + db := dbRaw.(*MSSQL) + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test with no configured Creation Statememt + _, _, err = db.CreateUser(dbplugin.Statements{}, "test", time.Now().Add(time.Minute)) + if err == nil { + t.Fatal("Expected error when no creation statement is provided") + } + + statements := dbplugin.Statements{ + CreationStatements: testMSSQLRole, + } + + username, password, err := db.CreateUser(statements, "test", time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } +} + +func TestMSSQL_RevokeUser(t *testing.T) { + if os.Getenv("MSSQL_URL") == "" || os.Getenv("VAULT_ACC") != "1" { + return + } + connURL := os.Getenv("MSSQL_URL") + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, _ := New() + db := dbRaw.(*MSSQL) + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + CreationStatements: testMSSQLRole, + } + + username, password, err := db.CreateUser(statements, "test", time.Now().Add(2*time.Second)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Test default revoke statememts + err = db.RevokeUser(statements, username) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } + + username, password, err = db.CreateUser(statements, "test", time.Now().Add(2*time.Second)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Test custom revoke statememt + statements.RevocationStatements = testMSSQLDrop + err = db.RevokeUser(statements, username) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } +} + +func testCredsExist(t testing.TB, connURL, username, password string) error { + // Log in with the new creds + parts := strings.Split(connURL, "@") + connURL = fmt.Sprintf("sqlserver://%s:%s@%s", username, password, parts[1]) + db, err := sql.Open("mssql", connURL) + if err != nil { + return err + } + defer db.Close() + return db.Ping() +} + +const testMSSQLRole = ` +CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}'; +CREATE USER [{{name}}] FOR LOGIN [{{name}}]; +GRANT SELECT, INSERT, UPDATE, DELETE ON SCHEMA::dbo TO [{{name}}];` + +const testMSSQLDrop = ` +DROP USER [{{name}}]; +DROP LOGIN [{{name}}]; +` diff --git a/plugins/database/mysql/mysql-database-plugin/main.go b/plugins/database/mysql/mysql-database-plugin/main.go new file mode 100644 index 000000000..249e5afee --- /dev/null +++ b/plugins/database/mysql/mysql-database-plugin/main.go @@ -0,0 +1,21 @@ +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/plugins/database/mysql" +) + +func main() { + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args) + + err := mysql.Run(apiClientMeta.GetTLSConfig()) + if err != nil { + log.Println(err) + os.Exit(1) + } +} diff --git a/plugins/database/mysql/mysql.go b/plugins/database/mysql/mysql.go new file mode 100644 index 000000000..b875af520 --- /dev/null +++ b/plugins/database/mysql/mysql.go @@ -0,0 +1,201 @@ +package mysql + +import ( + "database/sql" + "strings" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/plugins" + "github.com/hashicorp/vault/plugins/helper/database/connutil" + "github.com/hashicorp/vault/plugins/helper/database/credsutil" + "github.com/hashicorp/vault/plugins/helper/database/dbutil" +) + +const ( + defaultMysqlRevocationStmts = ` + REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%'; + DROP USER '{{name}}'@'%' + ` + mySQLTypeName = "mysql" +) + +var ( + DisplayNameLen int = 10 + LegacyDisplayNameLen int = 4 + UsernameLen int = 32 + LegacyUsernameLen int = 16 +) + +type MySQL struct { + connutil.ConnectionProducer + credsutil.CredentialsProducer +} + +// New implements builtinplugins.BuiltinFactory +func New(displayLen, usernameLen int) func() (interface{}, error) { + return func() (interface{}, error) { + connProducer := &connutil.SQLConnectionProducer{} + connProducer.Type = mySQLTypeName + + credsProducer := &credsutil.SQLCredentialsProducer{ + DisplayNameLen: displayLen, + UsernameLen: usernameLen, + } + + dbType := &MySQL{ + ConnectionProducer: connProducer, + CredentialsProducer: credsProducer, + } + + return dbType, nil + } +} + +// Run instantiates a MySQL object, and runs the RPC server for the plugin +func Run(apiTLSConfig *api.TLSConfig) error { + f := New(DisplayNameLen, UsernameLen) + dbType, err := f() + if err != nil { + return err + } + + plugins.Serve(dbType.(*MySQL), apiTLSConfig) + + return nil +} + +func (m *MySQL) Type() (string, error) { + return mySQLTypeName, nil +} + +func (m *MySQL) getConnection() (*sql.DB, error) { + db, err := m.Connection() + if err != nil { + return nil, err + } + + return db.(*sql.DB), nil +} + +func (m *MySQL) CreateUser(statements dbplugin.Statements, usernamePrefix string, expiration time.Time) (username string, password string, err error) { + // Grab the lock + m.Lock() + defer m.Unlock() + + // Get the connection + db, err := m.getConnection() + if err != nil { + return "", "", err + } + + if statements.CreationStatements == "" { + return "", "", dbutil.ErrEmptyCreationStatement + } + + username, err = m.GenerateUsername(usernamePrefix) + if err != nil { + return "", "", err + } + + password, err = m.GeneratePassword() + if err != nil { + return "", "", err + } + + expirationStr, err := m.GenerateExpiration(expiration) + if err != nil { + return "", "", err + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return "", "", err + } + defer tx.Rollback() + + // Execute each query + for _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{ + "name": username, + "password": password, + "expiration": expirationStr, + })) + if err != nil { + return "", "", err + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return "", "", err + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return "", "", err + } + + return username, password, nil +} + +// NOOP +func (m *MySQL) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error { + return nil +} + +func (m *MySQL) RevokeUser(statements dbplugin.Statements, username string) error { + // Grab the read lock + m.Lock() + defer m.Unlock() + + // Get the connection + db, err := m.getConnection() + if err != nil { + return err + } + + revocationStmts := statements.RevocationStatements + // Use a default SQL statement for revocation if one cannot be fetched from the role + if revocationStmts == "" { + revocationStmts = defaultMysqlRevocationStmts + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return err + } + defer tx.Rollback() + + for _, query := range strutil.ParseArbitraryStringSlice(revocationStmts, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + // This is not a prepared statement because not all commands are supported + // 1295: This command is not supported in the prepared statement protocol yet + // Reference https://mariadb.com/kb/en/mariadb/prepare-statement/ + query = strings.Replace(query, "{{name}}", username, -1) + _, err = tx.Exec(query) + if err != nil { + return err + } + + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return err + } + + return nil +} diff --git a/plugins/database/mysql/mysql_test.go b/plugins/database/mysql/mysql_test.go new file mode 100644 index 000000000..72dbd8156 --- /dev/null +++ b/plugins/database/mysql/mysql_test.go @@ -0,0 +1,206 @@ +package mysql + +import ( + "database/sql" + "fmt" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/plugins/helper/database/connutil" + dockertest "gopkg.in/ory-am/dockertest.v3" +) + +var ( + testMySQLImagePull sync.Once +) + +func prepareMySQLTestContainer(t *testing.T) (cleanup func(), retURL string) { + if os.Getenv("MYSQL_URL") != "" { + return func() {}, os.Getenv("MYSQL_URL") + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("Failed to connect to docker: %s", err) + } + + resource, err := pool.Run("mysql", "latest", []string{"MYSQL_ROOT_PASSWORD=secret"}) + if err != nil { + t.Fatalf("Could not start local MySQL docker container: %s", err) + } + + cleanup = func() { + err := pool.Purge(resource) + if err != nil { + t.Fatalf("Failed to cleanup local container: %s", err) + } + } + + retURL = fmt.Sprintf("root:secret@(localhost:%s)/mysql?parseTime=true", resource.GetPort("3306/tcp")) + + // exponential backoff-retry + if err = pool.Retry(func() error { + var err error + var db *sql.DB + db, err = sql.Open("mysql", retURL) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + t.Fatalf("Could not connect to MySQL docker container: %s", err) + } + + return +} + +func TestMySQL_Initialize(t *testing.T) { + cleanup, connURL := prepareMySQLTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + f := New(DisplayNameLen, UsernameLen) + dbRaw, _ := f() + db := dbRaw.(*MySQL) + connProducer := db.ConnectionProducer.(*connutil.SQLConnectionProducer) + + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !connProducer.Initialized { + t.Fatal("Database should be initalized") + } + + err = db.Close() + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestMySQL_CreateUser(t *testing.T) { + cleanup, connURL := prepareMySQLTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + f := New(DisplayNameLen, UsernameLen) + dbRaw, _ := f() + db := dbRaw.(*MySQL) + + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test with no configured Creation Statememt + _, _, err = db.CreateUser(dbplugin.Statements{}, "test", time.Now().Add(time.Minute)) + if err == nil { + t.Fatal("Expected error when no creation statement is provided") + } + + statements := dbplugin.Statements{ + CreationStatements: testMySQLRoleWildCard, + } + + username, password, err := db.CreateUser(statements, "test", time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } +} + +func TestMySQL_RevokeUser(t *testing.T) { + cleanup, connURL := prepareMySQLTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + f := New(DisplayNameLen, UsernameLen) + dbRaw, _ := f() + db := dbRaw.(*MySQL) + + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + CreationStatements: testMySQLRoleWildCard, + } + + username, password, err := db.CreateUser(statements, "test", time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Test default revoke statememts + err = db.RevokeUser(statements, username) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } + + statements.CreationStatements = testMySQLRoleWildCard + username, password, err = db.CreateUser(statements, "test", time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Test custom revoke statements + statements.RevocationStatements = testMySQLRevocationSQL + err = db.RevokeUser(statements, username) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } +} + +func testCredsExist(t testing.TB, connURL, username, password string) error { + // Log in with the new creds + connURL = strings.Replace(connURL, "root:secret", fmt.Sprintf("%s:%s", username, password), 1) + db, err := sql.Open("mysql", connURL) + if err != nil { + return err + } + defer db.Close() + return db.Ping() +} + +const testMySQLRoleWildCard = ` +CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}'; +GRANT SELECT ON *.* TO '{{name}}'@'%'; +` +const testMySQLRevocationSQL = ` +REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%'; +DROP USER '{{name}}'@'%'; +` diff --git a/plugins/database/postgresql/postgresql-database-plugin/main.go b/plugins/database/postgresql/postgresql-database-plugin/main.go new file mode 100644 index 000000000..ac3cf95a7 --- /dev/null +++ b/plugins/database/postgresql/postgresql-database-plugin/main.go @@ -0,0 +1,21 @@ +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/plugins/database/postgresql" +) + +func main() { + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args) + + err := postgresql.Run(apiClientMeta.GetTLSConfig()) + if err != nil { + log.Println(err) + os.Exit(1) + } +} diff --git a/plugins/database/postgresql/postgresql.go b/plugins/database/postgresql/postgresql.go new file mode 100644 index 000000000..d60ef8bbe --- /dev/null +++ b/plugins/database/postgresql/postgresql.go @@ -0,0 +1,343 @@ +package postgresql + +import ( + "database/sql" + "fmt" + "strings" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/plugins" + "github.com/hashicorp/vault/plugins/helper/database/connutil" + "github.com/hashicorp/vault/plugins/helper/database/credsutil" + "github.com/hashicorp/vault/plugins/helper/database/dbutil" + "github.com/lib/pq" +) + +const postgreSQLTypeName string = "postgres" + +// New implements builtinplugins.BuiltinFactory +func New() (interface{}, error) { + connProducer := &connutil.SQLConnectionProducer{} + connProducer.Type = postgreSQLTypeName + + credsProducer := &credsutil.SQLCredentialsProducer{ + DisplayNameLen: 10, + UsernameLen: 63, + } + + dbType := &PostgreSQL{ + ConnectionProducer: connProducer, + CredentialsProducer: credsProducer, + } + + return dbType, nil +} + +// Run instantiates a PostgreSQL object, and runs the RPC server for the plugin +func Run(apiTLSConfig *api.TLSConfig) error { + dbType, err := New() + if err != nil { + return err + } + + plugins.Serve(dbType.(*PostgreSQL), apiTLSConfig) + + return nil +} + +type PostgreSQL struct { + connutil.ConnectionProducer + credsutil.CredentialsProducer +} + +func (p *PostgreSQL) Type() (string, error) { + return postgreSQLTypeName, nil +} + +func (p *PostgreSQL) getConnection() (*sql.DB, error) { + db, err := p.Connection() + if err != nil { + return nil, err + } + + return db.(*sql.DB), nil +} + +func (p *PostgreSQL) CreateUser(statements dbplugin.Statements, usernamePrefix string, expiration time.Time) (username string, password string, err error) { + if statements.CreationStatements == "" { + return "", "", dbutil.ErrEmptyCreationStatement + } + + // Grab the lock + p.Lock() + defer p.Unlock() + + username, err = p.GenerateUsername(usernamePrefix) + if err != nil { + return "", "", err + } + + password, err = p.GeneratePassword() + if err != nil { + return "", "", err + } + + expirationStr, err := p.GenerateExpiration(expiration) + if err != nil { + return "", "", err + } + + // Get the connection + db, err := p.getConnection() + if err != nil { + return "", "", err + + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return "", "", err + + } + defer func() { + tx.Rollback() + }() + // Return the secret + + // Execute each query + for _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{ + "name": username, + "password": password, + "expiration": expirationStr, + })) + if err != nil { + return "", "", err + + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return "", "", err + + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return "", "", err + + } + + return username, password, nil +} + +func (p *PostgreSQL) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error { + // Grab the lock + p.Lock() + defer p.Unlock() + + db, err := p.getConnection() + if err != nil { + return err + } + + expirationStr, err := p.GenerateExpiration(expiration) + if err != nil { + return err + } + + query := fmt.Sprintf( + "ALTER ROLE %s VALID UNTIL '%s';", + pq.QuoteIdentifier(username), + expirationStr) + + stmt, err := db.Prepare(query) + if err != nil { + return err + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return err + } + + return nil +} + +func (p *PostgreSQL) RevokeUser(statements dbplugin.Statements, username string) error { + // Grab the lock + p.Lock() + defer p.Unlock() + + if statements.RevocationStatements == "" { + return p.defaultRevokeUser(username) + } + + return p.customRevokeUser(username, statements.RevocationStatements) +} + +func (p *PostgreSQL) customRevokeUser(username, revocationStmts string) error { + db, err := p.getConnection() + if err != nil { + return err + } + + tx, err := db.Begin() + if err != nil { + return err + } + defer func() { + tx.Rollback() + }() + + for _, query := range strutil.ParseArbitraryStringSlice(revocationStmts, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{ + "name": username, + })) + if err != nil { + return err + } + defer stmt.Close() + + if _, err := stmt.Exec(); err != nil { + return err + } + } + + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +func (p *PostgreSQL) defaultRevokeUser(username string) error { + db, err := p.getConnection() + if err != nil { + return err + } + + // Check if the role exists + var exists bool + err = db.QueryRow("SELECT exists (SELECT rolname FROM pg_roles WHERE rolname=$1);", username).Scan(&exists) + if err != nil && err != sql.ErrNoRows { + return err + } + + if exists == false { + return nil + } + + // Query for permissions; we need to revoke permissions before we can drop + // the role + // This isn't done in a transaction because even if we fail along the way, + // we want to remove as much access as possible + stmt, err := db.Prepare("SELECT DISTINCT table_schema FROM information_schema.role_column_grants WHERE grantee=$1;") + if err != nil { + return err + } + defer stmt.Close() + + rows, err := stmt.Query(username) + if err != nil { + return err + } + defer rows.Close() + + const initialNumRevocations = 16 + revocationStmts := make([]string, 0, initialNumRevocations) + for rows.Next() { + var schema string + err = rows.Scan(&schema) + if err != nil { + // keep going; remove as many permissions as possible right now + continue + } + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA %s FROM %s;`, + pq.QuoteIdentifier(schema), + pq.QuoteIdentifier(username))) + + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE USAGE ON SCHEMA %s FROM %s;`, + pq.QuoteIdentifier(schema), + pq.QuoteIdentifier(username))) + } + + // for good measure, revoke all privileges and usage on schema public + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM %s;`, + pq.QuoteIdentifier(username))) + + revocationStmts = append(revocationStmts, fmt.Sprintf( + "REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM %s;", + pq.QuoteIdentifier(username))) + + revocationStmts = append(revocationStmts, fmt.Sprintf( + "REVOKE USAGE ON SCHEMA public FROM %s;", + pq.QuoteIdentifier(username))) + + // get the current database name so we can issue a REVOKE CONNECT for + // this username + var dbname sql.NullString + if err := db.QueryRow("SELECT current_database();").Scan(&dbname); err != nil { + return err + } + + if dbname.Valid { + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE CONNECT ON DATABASE %s FROM %s;`, + pq.QuoteIdentifier(dbname.String), + pq.QuoteIdentifier(username))) + } + + // again, here, we do not stop on error, as we want to remove as + // many permissions as possible right now + var lastStmtError error + for _, query := range revocationStmts { + stmt, err := db.Prepare(query) + if err != nil { + lastStmtError = err + continue + } + defer stmt.Close() + _, err = stmt.Exec() + if err != nil { + lastStmtError = err + } + } + + // can't drop if not all privileges are revoked + if rows.Err() != nil { + return fmt.Errorf("could not generate revocation statements for all rows: %s", rows.Err()) + } + if lastStmtError != nil { + return fmt.Errorf("could not perform all revocation statements: %s", lastStmtError) + } + + // Drop this user + stmt, err = db.Prepare(fmt.Sprintf( + `DROP ROLE IF EXISTS %s;`, pq.QuoteIdentifier(username))) + if err != nil { + return err + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return err + } + + return nil +} diff --git a/plugins/database/postgresql/postgresql_test.go b/plugins/database/postgresql/postgresql_test.go new file mode 100644 index 000000000..79391dc56 --- /dev/null +++ b/plugins/database/postgresql/postgresql_test.go @@ -0,0 +1,313 @@ +package postgresql + +import ( + "database/sql" + "fmt" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/plugins/helper/database/connutil" + dockertest "gopkg.in/ory-am/dockertest.v3" +) + +var ( + testPostgresImagePull sync.Once +) + +func preparePostgresTestContainer(t *testing.T) (cleanup func(), retURL string) { + if os.Getenv("PG_URL") != "" { + return func() {}, os.Getenv("PG_URL") + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("Failed to connect to docker: %s", err) + } + + resource, err := pool.Run("postgres", "latest", []string{"POSTGRES_PASSWORD=secret", "POSTGRES_DB=database"}) + if err != nil { + t.Fatalf("Could not start local PostgreSQL docker container: %s", err) + } + + cleanup = func() { + err := pool.Purge(resource) + if err != nil { + t.Fatalf("Failed to cleanup local container: %s", err) + } + } + + retURL = fmt.Sprintf("postgres://postgres:secret@localhost:%s/database?sslmode=disable", resource.GetPort("5432/tcp")) + + // exponential backoff-retry + if err = pool.Retry(func() error { + var err error + var db *sql.DB + db, err = sql.Open("postgres", retURL) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + t.Fatalf("Could not connect to PostgreSQL docker container: %s", err) + } + + return +} + +func TestPostgreSQL_Initialize(t *testing.T) { + cleanup, connURL := preparePostgresTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, _ := New() + db := dbRaw.(*PostgreSQL) + + connProducer := db.ConnectionProducer.(*connutil.SQLConnectionProducer) + + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !connProducer.Initialized { + t.Fatal("Database should be initalized") + } + + err = db.Close() + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestPostgreSQL_CreateUser(t *testing.T) { + cleanup, connURL := preparePostgresTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, _ := New() + db := dbRaw.(*PostgreSQL) + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test with no configured Creation Statememt + _, _, err = db.CreateUser(dbplugin.Statements{}, "test", time.Now().Add(time.Minute)) + if err == nil { + t.Fatal("Expected error when no creation statement is provided") + } + + statements := dbplugin.Statements{ + CreationStatements: testPostgresRole, + } + + username, password, err := db.CreateUser(statements, "test", time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + statements.CreationStatements = testPostgresReadOnlyRole + username, password, err = db.CreateUser(statements, "test", time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } +} + +func TestPostgreSQL_RenewUser(t *testing.T) { + cleanup, connURL := preparePostgresTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, _ := New() + db := dbRaw.(*PostgreSQL) + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + CreationStatements: testPostgresRole, + } + + username, password, err := db.CreateUser(statements, "test", time.Now().Add(2*time.Second)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + err = db.RenewUser(statements, username, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Sleep longer than the inital expiration time + time.Sleep(2 * time.Second) + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } +} + +func TestPostgreSQL_RevokeUser(t *testing.T) { + cleanup, connURL := preparePostgresTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, _ := New() + db := dbRaw.(*PostgreSQL) + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + CreationStatements: testPostgresRole, + } + + username, password, err := db.CreateUser(statements, "test", time.Now().Add(2*time.Second)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Test default revoke statememts + err = db.RevokeUser(statements, username) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } + + username, password, err = db.CreateUser(statements, "test", time.Now().Add(2*time.Second)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Test custom revoke statements + statements.RevocationStatements = defaultPostgresRevocationSQL + err = db.RevokeUser(statements, username) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } +} + +func testCredsExist(t testing.TB, connURL, username, password string) error { + // Log in with the new creds + connURL = strings.Replace(connURL, "postgres:secret", fmt.Sprintf("%s:%s", username, password), 1) + db, err := sql.Open("postgres", connURL) + if err != nil { + return err + } + defer db.Close() + return db.Ping() +} + +const testPostgresRole = ` +CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; +` + +const testPostgresReadOnlyRole = ` +CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; +GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}"; +GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "{{name}}"; +` + +const testPostgresBlockStatementRole = ` +DO $$ +BEGIN + IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN + CREATE ROLE "foo-role"; + CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role"; + ALTER ROLE "foo-role" SET search_path = foo; + GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role"; + GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role"; + END IF; +END +$$ + +CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'; +GRANT "foo-role" TO "{{name}}"; +ALTER ROLE "{{name}}" SET search_path = foo; +GRANT CONNECT ON DATABASE "postgres" TO "{{name}}"; +` + +var testPostgresBlockStatementRoleSlice = []string{ + ` +DO $$ +BEGIN + IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN + CREATE ROLE "foo-role"; + CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role"; + ALTER ROLE "foo-role" SET search_path = foo; + GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role"; + GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role"; + END IF; +END +$$ +`, + `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';`, + `GRANT "foo-role" TO "{{name}}";`, + `ALTER ROLE "{{name}}" SET search_path = foo;`, + `GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";`, +} + +const defaultPostgresRevocationSQL = ` +REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM "{{name}}"; +REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM "{{name}}"; +REVOKE USAGE ON SCHEMA public FROM "{{name}}"; + +DROP ROLE IF EXISTS "{{name}}"; +` diff --git a/plugins/helper/database/connutil/cassandra.go b/plugins/helper/database/connutil/cassandra.go new file mode 100644 index 000000000..869c39e3b --- /dev/null +++ b/plugins/helper/database/connutil/cassandra.go @@ -0,0 +1,226 @@ +package connutil + +import ( + "crypto/tls" + "fmt" + "strings" + "sync" + "time" + + "github.com/mitchellh/mapstructure" + + "github.com/gocql/gocql" + "github.com/hashicorp/vault/helper/certutil" + "github.com/hashicorp/vault/helper/parseutil" + "github.com/hashicorp/vault/helper/tlsutil" +) + +// CassandraConnectionProducer implements ConnectionProducer and provides an +// interface for cassandra databases to make connections. +type CassandraConnectionProducer struct { + Hosts string `json:"hosts" structs:"hosts" mapstructure:"hosts"` + Username string `json:"username" structs:"username" mapstructure:"username"` + Password string `json:"password" structs:"password" mapstructure:"password"` + TLS bool `json:"tls" structs:"tls" mapstructure:"tls"` + InsecureTLS bool `json:"insecure_tls" structs:"insecure_tls" mapstructure:"insecure_tls"` + ProtocolVersion int `json:"protocol_version" structs:"protocol_version" mapstructure:"protocol_version"` + ConnectTimeoutRaw interface{} `json:"connect_timeout" structs:"connect_timeout" mapstructure:"connect_timeout"` + TLSMinVersion string `json:"tls_min_version" structs:"tls_min_version" mapstructure:"tls_min_version"` + Consistency string `json:"consistency" structs:"consistency" mapstructure:"consistency"` + PemBundle string `json:"pem_bundle" structs:"pem_bundle" mapstructure:"pem_bundle"` + PemJSON string `json:"pem_json" structs:"pem_json" mapstructure:"pem_json"` + + connectTimeout time.Duration + certificate string + privateKey string + issuingCA string + + Initialized bool + Type string + session *gocql.Session + sync.Mutex +} + +func (c *CassandraConnectionProducer) Initialize(conf map[string]interface{}, verifyConnection bool) error { + c.Lock() + defer c.Unlock() + + err := mapstructure.Decode(conf, c) + if err != nil { + return err + } + c.Initialized = true + + if c.ConnectTimeoutRaw == nil { + c.ConnectTimeoutRaw = "0s" + } + c.connectTimeout, err = parseutil.ParseDurationSecond(c.ConnectTimeoutRaw) + if err != nil { + return fmt.Errorf("invalid connect_timeout: %s", err) + } + + switch { + case len(c.Hosts) == 0: + return fmt.Errorf("hosts cannot be empty") + case len(c.Username) == 0: + return fmt.Errorf("username cannot be empty") + case len(c.Password) == 0: + return fmt.Errorf("password cannot be empty") + } + + var certBundle *certutil.CertBundle + var parsedCertBundle *certutil.ParsedCertBundle + switch { + case len(c.PemJSON) != 0: + parsedCertBundle, err = certutil.ParsePKIJSON([]byte(c.PemJSON)) + if err != nil { + return fmt.Errorf("could not parse given JSON; it must be in the format of the output of the PKI backend certificate issuing command: %s", err) + } + certBundle, err = parsedCertBundle.ToCertBundle() + if err != nil { + return fmt.Errorf("Error marshaling PEM information: %s", err) + } + c.certificate = certBundle.Certificate + c.privateKey = certBundle.PrivateKey + c.issuingCA = certBundle.IssuingCA + c.TLS = true + + case len(c.PemBundle) != 0: + parsedCertBundle, err = certutil.ParsePEMBundle(c.PemBundle) + if err != nil { + return fmt.Errorf("Error parsing the given PEM information: %s", err) + } + certBundle, err = parsedCertBundle.ToCertBundle() + if err != nil { + return fmt.Errorf("Error marshaling PEM information: %s", err) + } + c.certificate = certBundle.Certificate + c.privateKey = certBundle.PrivateKey + c.issuingCA = certBundle.IssuingCA + c.TLS = true + } + + if verifyConnection { + if _, err := c.Connection(); err != nil { + return fmt.Errorf("error Initalizing Connection: %s", err) + } + } + return nil +} + +func (c *CassandraConnectionProducer) Connection() (interface{}, error) { + if !c.Initialized { + return nil, errNotInitialized + } + + // If we already have a DB, return it + if c.session != nil { + return c.session, nil + } + + session, err := c.createSession() + if err != nil { + return nil, err + } + + // Store the session in backend for reuse + c.session = session + + return session, nil +} + +func (c *CassandraConnectionProducer) Close() error { + // Grab the write lock + c.Lock() + defer c.Unlock() + + if c.session != nil { + c.session.Close() + } + + c.session = nil + + return nil +} + +func (c *CassandraConnectionProducer) createSession() (*gocql.Session, error) { + clusterConfig := gocql.NewCluster(strings.Split(c.Hosts, ",")...) + clusterConfig.Authenticator = gocql.PasswordAuthenticator{ + Username: c.Username, + Password: c.Password, + } + + clusterConfig.ProtoVersion = c.ProtocolVersion + if clusterConfig.ProtoVersion == 0 { + clusterConfig.ProtoVersion = 2 + } + + clusterConfig.Timeout = c.connectTimeout + if c.TLS { + var tlsConfig *tls.Config + if len(c.certificate) > 0 || len(c.issuingCA) > 0 { + if len(c.certificate) > 0 && len(c.privateKey) == 0 { + return nil, fmt.Errorf("found certificate for TLS authentication but no private key") + } + + certBundle := &certutil.CertBundle{} + if len(c.certificate) > 0 { + certBundle.Certificate = c.certificate + certBundle.PrivateKey = c.privateKey + } + if len(c.issuingCA) > 0 { + certBundle.IssuingCA = c.issuingCA + } + + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return nil, fmt.Errorf("failed to parse certificate bundle: %s", err) + } + + tlsConfig, err = parsedCertBundle.GetTLSConfig(certutil.TLSClient) + if err != nil || tlsConfig == nil { + return nil, fmt.Errorf("failed to get TLS configuration: tlsConfig:%#v err:%v", tlsConfig, err) + } + tlsConfig.InsecureSkipVerify = c.InsecureTLS + + if c.TLSMinVersion != "" { + var ok bool + tlsConfig.MinVersion, ok = tlsutil.TLSLookup[c.TLSMinVersion] + if !ok { + return nil, fmt.Errorf("invalid 'tls_min_version' in config") + } + } else { + // MinVersion was not being set earlier. Reset it to + // zero to gracefully handle upgrades. + tlsConfig.MinVersion = 0 + } + } + + clusterConfig.SslOpts = &gocql.SslOptions{ + Config: tlsConfig, + } + } + + session, err := clusterConfig.CreateSession() + if err != nil { + return nil, fmt.Errorf("error creating session: %s", err) + } + + // Set consistency + if c.Consistency != "" { + consistencyValue, err := gocql.ParseConsistencyWrapper(c.Consistency) + if err != nil { + return nil, err + } + + session.SetConsistency(consistencyValue) + } + + // Verify the info + err = session.Query(`LIST USERS`).Exec() + if err != nil { + return nil, fmt.Errorf("error validating connection info: %s", err) + } + + return session, nil +} diff --git a/plugins/helper/database/connutil/connutil.go b/plugins/helper/database/connutil/connutil.go new file mode 100644 index 000000000..c43691c61 --- /dev/null +++ b/plugins/helper/database/connutil/connutil.go @@ -0,0 +1,21 @@ +package connutil + +import ( + "errors" + "sync" +) + +var ( + errNotInitialized = errors.New("connection has not been initalized") +) + +// ConnectionProducer can be used as an embeded interface in the Database +// definition. It implements the methods dealing with individual database +// connections and is used in all the builtin database types. +type ConnectionProducer interface { + Close() error + Initialize(map[string]interface{}, bool) error + Connection() (interface{}, error) + + sync.Locker +} diff --git a/plugins/helper/database/connutil/sql.go b/plugins/helper/database/connutil/sql.go new file mode 100644 index 000000000..04269798f --- /dev/null +++ b/plugins/helper/database/connutil/sql.go @@ -0,0 +1,136 @@ +package connutil + +import ( + "database/sql" + "fmt" + "strings" + "sync" + "time" + + // Import sql drivers + _ "github.com/denisenkom/go-mssqldb" + _ "github.com/go-sql-driver/mysql" + "github.com/hashicorp/vault/helper/parseutil" + _ "github.com/lib/pq" + "github.com/mitchellh/mapstructure" +) + +// SQLConnectionProducer implements ConnectionProducer and provides a generic producer for most sql databases +type SQLConnectionProducer struct { + ConnectionURL string `json:"connection_url" structs:"connection_url" mapstructure:"connection_url"` + MaxOpenConnections int `json:"max_open_connections" structs:"max_open_connections" mapstructure:"max_open_connections"` + MaxIdleConnections int `json:"max_idle_connections" structs:"max_idle_connections" mapstructure:"max_idle_connections"` + MaxConnectionLifetimeRaw interface{} `json:"max_connection_lifetime" structs:"max_connection_lifetime" mapstructure:"max_connection_lifetime"` + + Type string + maxConnectionLifetime time.Duration + Initialized bool + db *sql.DB + sync.Mutex +} + +func (c *SQLConnectionProducer) Initialize(conf map[string]interface{}, verifyConnection bool) error { + c.Lock() + defer c.Unlock() + + err := mapstructure.Decode(conf, c) + if err != nil { + return err + } + + if len(c.ConnectionURL) == 0 { + return fmt.Errorf("connection_url cannot be empty") + } + + if c.MaxOpenConnections == 0 { + c.MaxOpenConnections = 2 + } + + if c.MaxIdleConnections == 0 { + c.MaxIdleConnections = c.MaxOpenConnections + } + if c.MaxIdleConnections > c.MaxOpenConnections { + c.MaxIdleConnections = c.MaxOpenConnections + } + if c.MaxConnectionLifetimeRaw == nil { + c.MaxConnectionLifetimeRaw = "0s" + } + + c.maxConnectionLifetime, err = parseutil.ParseDurationSecond(c.MaxConnectionLifetimeRaw) + if err != nil { + return fmt.Errorf("invalid max_connection_lifetime: %s", err) + } + + if verifyConnection { + if _, err := c.Connection(); err != nil { + return fmt.Errorf("error initalizing connection: %s", err) + } + + if err := c.db.Ping(); err != nil { + return fmt.Errorf("error initalizing connection: %s", err) + } + } + + c.Initialized = true + + return nil +} + +func (c *SQLConnectionProducer) Connection() (interface{}, error) { + // If we already have a DB, test it and return + if c.db != nil { + if err := c.db.Ping(); err == nil { + return c.db, nil + } + // If the ping was unsuccessful, close it and ignore errors as we'll be + // reestablishing anyways + c.db.Close() + } + + // For mssql backend, switch to sqlserver instead + dbType := c.Type + if c.Type == "mssql" { + dbType = "sqlserver" + } + + // Otherwise, attempt to make connection + conn := c.ConnectionURL + + // Ensure timezone is set to UTC for all the conenctions + if strings.HasPrefix(conn, "postgres://") || strings.HasPrefix(conn, "postgresql://") { + if strings.Contains(conn, "?") { + conn += "&timezone=utc" + } else { + conn += "?timezone=utc" + } + } + + var err error + c.db, err = sql.Open(dbType, conn) + if err != nil { + return nil, err + } + + // Set some connection pool settings. We don't need much of this, + // since the request rate shouldn't be high. + c.db.SetMaxOpenConns(c.MaxOpenConnections) + c.db.SetMaxIdleConns(c.MaxIdleConnections) + c.db.SetConnMaxLifetime(c.maxConnectionLifetime) + + return c.db, nil +} + +// Close attempts to close the connection +func (c *SQLConnectionProducer) Close() error { + // Grab the write lock + c.Lock() + defer c.Unlock() + + if c.db != nil { + c.db.Close() + } + + c.db = nil + + return nil +} diff --git a/plugins/helper/database/credsutil/cassandra.go b/plugins/helper/database/credsutil/cassandra.go new file mode 100644 index 000000000..7ab5630b5 --- /dev/null +++ b/plugins/helper/database/credsutil/cassandra.go @@ -0,0 +1,37 @@ +package credsutil + +import ( + "fmt" + "strings" + "time" + + uuid "github.com/hashicorp/go-uuid" +) + +// CassandraCredentialsProducer implements CredentialsProducer and provides an +// interface for cassandra databases to generate user information. +type CassandraCredentialsProducer struct{} + +func (ccp *CassandraCredentialsProducer) GenerateUsername(displayName string) (string, error) { + userUUID, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + username := fmt.Sprintf("vault_%s_%s_%d", displayName, userUUID, time.Now().Unix()) + username = strings.Replace(username, "-", "_", -1) + + return username, nil +} + +func (ccp *CassandraCredentialsProducer) GeneratePassword() (string, error) { + password, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + + return password, nil +} + +func (ccp *CassandraCredentialsProducer) GenerateExpiration(ttl time.Time) (string, error) { + return "", nil +} diff --git a/plugins/helper/database/credsutil/credsutil.go b/plugins/helper/database/credsutil/credsutil.go new file mode 100644 index 000000000..bc35617ac --- /dev/null +++ b/plugins/helper/database/credsutil/credsutil.go @@ -0,0 +1,12 @@ +package credsutil + +import "time" + +// CredentialsProducer can be used as an embeded interface in the Database +// definition. It implements the methods for generating user information for a +// particular database type and is used in all the builtin database types. +type CredentialsProducer interface { + GenerateUsername(displayName string) (string, error) + GeneratePassword() (string, error) + GenerateExpiration(ttl time.Time) (string, error) +} diff --git a/plugins/helper/database/credsutil/sql.go b/plugins/helper/database/credsutil/sql.go new file mode 100644 index 000000000..a7929ccb1 --- /dev/null +++ b/plugins/helper/database/credsutil/sql.go @@ -0,0 +1,43 @@ +package credsutil + +import ( + "fmt" + "time" + + uuid "github.com/hashicorp/go-uuid" +) + +// SQLCredentialsProducer implements CredentialsProducer and provides a generic credentials producer for most sql database types. +type SQLCredentialsProducer struct { + DisplayNameLen int + UsernameLen int +} + +func (scp *SQLCredentialsProducer) GenerateUsername(displayName string) (string, error) { + if scp.DisplayNameLen > 0 && len(displayName) > scp.DisplayNameLen { + displayName = displayName[:scp.DisplayNameLen] + } + userUUID, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + username := fmt.Sprintf("v-%s-%s", displayName, userUUID) + if scp.UsernameLen > 0 && len(username) > scp.UsernameLen { + username = username[:scp.UsernameLen] + } + + return username, nil +} + +func (scp *SQLCredentialsProducer) GeneratePassword() (string, error) { + password, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + + return password, nil +} + +func (scp *SQLCredentialsProducer) GenerateExpiration(ttl time.Time) (string, error) { + return ttl.Format("2006-01-02 15:04:05-0700"), nil +} diff --git a/plugins/helper/database/dbutil/dbutil.go b/plugins/helper/database/dbutil/dbutil.go new file mode 100644 index 000000000..e80273b7f --- /dev/null +++ b/plugins/helper/database/dbutil/dbutil.go @@ -0,0 +1,20 @@ +package dbutil + +import ( + "errors" + "fmt" + "strings" +) + +var ( + ErrEmptyCreationStatement = errors.New("empty creation statements") +) + +// Query templates a query for us. +func QueryHelper(tpl string, data map[string]string) string { + for k, v := range data { + tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1) + } + + return tpl +} diff --git a/plugins/serve.go b/plugins/serve.go new file mode 100644 index 000000000..a40fc5b14 --- /dev/null +++ b/plugins/serve.go @@ -0,0 +1,31 @@ +package plugins + +import ( + "fmt" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/helper/pluginutil" +) + +// Serve is used to start a plugin's RPC server. It takes an interface that must +// implement a known plugin interface to vault and an optional api.TLSConfig for +// use during the inital unwrap request to vault. The api config is particulary +// useful when vault is setup to require client cert checking. +func Serve(plugin interface{}, tlsConfig *api.TLSConfig) { + tlsProvider := pluginutil.VaultPluginTLSProvider(tlsConfig) + + err := pluginutil.OptionallyEnableMlock() + if err != nil { + fmt.Println(err) + return + } + + switch p := plugin.(type) { + case dbplugin.Database: + dbplugin.Serve(p, tlsProvider) + default: + fmt.Println("Unsupported plugin type") + } + +} diff --git a/vault/core.go b/vault/core.go index 396a2bc16..f3ed06d38 100644 --- a/vault/core.go +++ b/vault/core.go @@ -10,6 +10,7 @@ import ( "net" "net/http" "net/url" + "path/filepath" "sync" "time" @@ -330,6 +331,14 @@ type Core struct { // uiEnabled indicates whether Vault Web UI is enabled or not uiEnabled bool + + // pluginDirectory is the location vault will look for plugin binaries + pluginDirectory string + + // pluginCatalog is used to manage plugin configurations + pluginCatalog *PluginCatalog + + enableMlock bool } // CoreConfig is used to parameterize a core @@ -374,6 +383,8 @@ type CoreConfig struct { EnableUI bool `json:"ui" structs:"ui" mapstructure:"ui"` + PluginDirectory string `json:"plugin_directory" structs:"plugin_directory" mapstructure:"plugin_directory"` + ReloadFuncs *map[string][]ReloadFunc ReloadFuncsLock *sync.RWMutex } @@ -430,6 +441,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { clusterName: conf.ClusterName, clusterListenerShutdownCh: make(chan struct{}), clusterListenerShutdownSuccessCh: make(chan struct{}), + enableMlock: !conf.DisableMlock, } // Wrap the physical backend in a cache layer if enabled and not already wrapped @@ -453,8 +465,15 @@ func NewCore(conf *CoreConfig) (*Core, error) { } } - // Construct a new AES-GCM barrier var err error + if conf.PluginDirectory != "" { + c.pluginDirectory, err = filepath.Abs(conf.PluginDirectory) + if err != nil { + return nil, fmt.Errorf("core setup failed, could not verify plugin directory: %v", err) + } + } + + // Construct a new AES-GCM barrier c.barrier, err = NewAESGCMBarrier(c.physical) if err != nil { return nil, fmt.Errorf("barrier setup failed: %v", err) @@ -1280,6 +1299,10 @@ func (c *Core) postUnseal() (retErr error) { if err := c.setupAuditedHeadersConfig(); err != nil { return err } + if err := c.setupPluginCatalog(); err != nil { + return err + } + if c.ha != nil { if err := c.startClusterListener(); err != nil { return err diff --git a/vault/dynamic_system_view.go b/vault/dynamic_system_view.go index 30b6a760b..3844b46bf 100644 --- a/vault/dynamic_system_view.go +++ b/vault/dynamic_system_view.go @@ -1,9 +1,12 @@ package vault import ( + "fmt" "time" "github.com/hashicorp/vault/helper/consts" + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/helper/wrapping" "github.com/hashicorp/vault/logical" ) @@ -87,3 +90,49 @@ func (d dynamicSystemView) ReplicationState() consts.ReplicationState { d.core.clusterParamsLock.RUnlock() return state } + +// ResponseWrapData wraps the given data in a cubbyhole and returns the +// token used to unwrap. +func (d dynamicSystemView) ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) { + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: "sys/wrapping/wrap", + } + + resp := &logical.Response{ + WrapInfo: &wrapping.ResponseWrapInfo{ + TTL: ttl, + }, + Data: data, + } + + if jwt { + resp.WrapInfo.Format = "jwt" + } + + _, err := d.core.wrapInCubbyhole(req, resp) + if err != nil { + return nil, err + } + + return resp.WrapInfo, nil +} + +// LookupPlugin looks for a plugin with the given name in the plugin catalog. It +// returns a PluginRunner or an error if no plugin was found. +func (d dynamicSystemView) LookupPlugin(name string) (*pluginutil.PluginRunner, error) { + r, err := d.core.pluginCatalog.Get(name) + if err != nil { + return nil, err + } + if r == nil { + return nil, fmt.Errorf("no plugin found with name: %s", name) + } + + return r, nil +} + +// MlockEnabled returns the configuration setting for enabling mlock on plugins. +func (d dynamicSystemView) MlockEnabled() bool { + return d.core.enableMlock +} diff --git a/vault/logical_system.go b/vault/logical_system.go index 5fdf3122e..4a7f28fb7 100644 --- a/vault/logical_system.go +++ b/vault/logical_system.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/vault/helper/consts" "github.com/hashicorp/vault/helper/parseutil" + "github.com/hashicorp/vault/helper/wrapping" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" "github.com/mitchellh/mapstructure" @@ -62,6 +63,7 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen "replication/reindex", "rotate", "config/auditing/*", + "plugins/catalog/*", "revoke-prefix/*", "leases/revoke-prefix/*", "leases/revoke-force/*", @@ -736,6 +738,48 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen HelpSynopsis: strings.TrimSpace(sysHelp["audited-headers"][0]), HelpDescription: strings.TrimSpace(sysHelp["audited-headers"][1]), }, + &framework.Path{ + Pattern: "plugins/catalog/$", + + Fields: map[string]*framework.FieldSchema{}, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.handlePluginCatalogList, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["plugin-catalog"][0]), + HelpDescription: strings.TrimSpace(sysHelp["plugin-catalog"][1]), + }, + &framework.Path{ + Pattern: "plugins/catalog/(?P.+)", + + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "The name of the plugin", + }, + "sha_256": &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The SHA256 sum of the executable used in the + command field. This should be HEX encoded.`, + }, + "command": &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The command used to start the plugin. The + executable defined in this command must exist in vault's + plugin directory.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.handlePluginCatalogUpdate, + logical.DeleteOperation: b.handlePluginCatalogDelete, + logical.ReadOperation: b.handlePluginCatalogRead, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["plugin-catalog"][0]), + HelpDescription: strings.TrimSpace(sysHelp["plugin-catalog"][1]), + }, }, } @@ -768,6 +812,77 @@ func (b *SystemBackend) invalidate(key string) { } } +func (b *SystemBackend) handlePluginCatalogList(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + plugins, err := b.Core.pluginCatalog.List() + if err != nil { + return nil, err + } + + return logical.ListResponse(plugins), nil +} + +func (b *SystemBackend) handlePluginCatalogUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + pluginName := d.Get("name").(string) + if pluginName == "" { + return logical.ErrorResponse("missing plugin name"), nil + } + + sha256 := d.Get("sha_256").(string) + if sha256 == "" { + return logical.ErrorResponse("missing SHA-256 value"), nil + } + + command := d.Get("command").(string) + if command == "" { + return logical.ErrorResponse("missing command value"), nil + } + + sha256Bytes, err := hex.DecodeString(sha256) + if err != nil { + return logical.ErrorResponse("Could not decode SHA-256 value from Hex"), err + } + + err = b.Core.pluginCatalog.Set(pluginName, command, sha256Bytes) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *SystemBackend) handlePluginCatalogRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + pluginName := d.Get("name").(string) + if pluginName == "" { + return logical.ErrorResponse("missing plugin name"), nil + } + plugin, err := b.Core.pluginCatalog.Get(pluginName) + if err != nil { + return nil, err + } + if plugin == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "plugin": plugin, + }, + }, nil +} + +func (b *SystemBackend) handlePluginCatalogDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + pluginName := d.Get("name").(string) + if pluginName == "" { + return logical.ErrorResponse("missing plugin name"), nil + } + err := b.Core.pluginCatalog.Delete(pluginName) + if err != nil { + return nil, err + } + + return nil, nil +} + // handleAuditedHeaderUpdate creates or overwrites a header entry func (b *SystemBackend) handleAuditedHeaderUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { header := d.Get("header").(string) @@ -2074,7 +2189,7 @@ func (b *SystemBackend) handleWrappingRewrap( Data: map[string]interface{}{ "response": response, }, - WrapInfo: &logical.ResponseWrapInfo{ + WrapInfo: &wrapping.ResponseWrapInfo{ TTL: time.Duration(creationTTL), }, }, nil @@ -2524,7 +2639,23 @@ This path responds to the following HTTP methods. "Lists the headers configured to be audited.", `Returns a list of headers that have been configured to be audited.`, }, + "plugins/catalog": { + `Configures the plugins known to vault`, + ` +This path responds to the following HTTP methods. + LIST / + Returns a list of names of configured plugins. + GET / + Retrieve the metadata for the named plugin. + + PUT / + Add or update plugin. + + DELETE / + Delete the plugin with the given name. + `, + }, "leases": { `View or list lease metadata.`, ` diff --git a/vault/logical_system_test.go b/vault/logical_system_test.go index 4f3f70fc4..0a84c04af 100644 --- a/vault/logical_system_test.go +++ b/vault/logical_system_test.go @@ -2,6 +2,11 @@ package vault import ( "crypto/sha256" + "encoding/hex" + "fmt" + "io/ioutil" + "os" + "path/filepath" "reflect" "strings" "testing" @@ -9,6 +14,8 @@ import ( "github.com/fatih/structs" "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/helper/builtinplugins" + "github.com/hashicorp/vault/helper/pluginutil" "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" "github.com/mitchellh/mapstructure" @@ -25,6 +32,7 @@ func TestSystemBackend_RootPaths(t *testing.T) { "replication/reindex", "rotate", "config/auditing/*", + "plugins/catalog/*", "revoke-prefix/*", "leases/revoke-prefix/*", "leases/revoke-force/*", @@ -1543,3 +1551,92 @@ func testCoreSystemBackend(t *testing.T) (*Core, logical.Backend, string) { } return c, b, root } + +func TestSystemBackend_PluginCatalog_CRUD(t *testing.T) { + c, b, _ := testCoreSystemBackend(t) + // Bootstrap the pluginCatalog + sym, err := filepath.EvalSymlinks(os.TempDir()) + if err != nil { + t.Fatalf("error: %v", err) + } + c.pluginCatalog.directory = sym + + req := logical.TestRequest(t, logical.ListOperation, "plugins/catalog/") + resp, err := b.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(resp.Data["keys"].([]string)) != len(builtinplugins.Keys()) { + t.Fatalf("Wrong number of plugins, got %d, expected %d", len(resp.Data["keys"].([]string)), len(builtinplugins.Keys())) + } + + req = logical.TestRequest(t, logical.ReadOperation, "plugins/catalog/mysql-database-plugin") + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) + } + + expectedBuiltin := &pluginutil.PluginRunner{ + Name: "mysql-database-plugin", + Builtin: true, + } + expectedBuiltin.BuiltinFactory, _ = builtinplugins.Get("mysql-database-plugin") + + p := resp.Data["plugin"].(*pluginutil.PluginRunner) + if &(p.BuiltinFactory) == &(expectedBuiltin.BuiltinFactory) { + t.Fatal("expected BuiltinFactory did not match actual") + } + + expectedBuiltin.BuiltinFactory = nil + p.BuiltinFactory = nil + if !reflect.DeepEqual(p, expectedBuiltin) { + t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", resp.Data["plugin"].(*pluginutil.PluginRunner), expectedBuiltin) + } + + // Set a plugin + file, err := ioutil.TempFile(os.TempDir(), "temp") + if err != nil { + t.Fatal(err) + } + defer file.Close() + + command := fmt.Sprintf("%s --test", filepath.Base(file.Name())) + req = logical.TestRequest(t, logical.UpdateOperation, "plugins/catalog/test-plugin") + req.Data["sha_256"] = hex.EncodeToString([]byte{'1'}) + req.Data["command"] = command + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) + } + + req = logical.TestRequest(t, logical.ReadOperation, "plugins/catalog/test-plugin") + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) + } + + expected := &pluginutil.PluginRunner{ + Name: "test-plugin", + Command: filepath.Join(sym, filepath.Base(file.Name())), + Args: []string{"--test"}, + Sha256: []byte{'1'}, + Builtin: false, + } + if !reflect.DeepEqual(resp.Data["plugin"].(*pluginutil.PluginRunner), expected) { + t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", resp.Data["plugin"].(*pluginutil.PluginRunner), expected) + } + + // Delete plugin + req = logical.TestRequest(t, logical.DeleteOperation, "plugins/catalog/test-plugin") + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) + } + + req = logical.TestRequest(t, logical.ReadOperation, "plugins/catalog/test-plugin") + resp, err = b.HandleRequest(req) + if resp != nil || err != nil { + t.Fatalf("expected nil response, plugin not deleted correctly got resp: %v, err: %v", resp, err) + } +} diff --git a/vault/plugin_catalog.go b/vault/plugin_catalog.go new file mode 100644 index 000000000..79474e601 --- /dev/null +++ b/vault/plugin_catalog.go @@ -0,0 +1,176 @@ +package vault + +import ( + "encoding/json" + "errors" + "fmt" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/hashicorp/vault/helper/builtinplugins" + "github.com/hashicorp/vault/helper/jsonutil" + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/logical" +) + +var ( + pluginCatalogPath = "core/plugin-catalog/" + ErrDirectoryNotConfigured = errors.New("could not set plugin, plugin directory is not configured") +) + +// PluginCatalog keeps a record of plugins known to vault. External plugins need +// to be registered to the catalog before they can be used in backends. Builtin +// plugins are automatically detected and included in the catalog. +type PluginCatalog struct { + catalogView *BarrierView + directory string + + lock sync.RWMutex +} + +func (c *Core) setupPluginCatalog() error { + c.pluginCatalog = &PluginCatalog{ + catalogView: NewBarrierView(c.barrier, pluginCatalogPath), + directory: c.pluginDirectory, + } + + return nil +} + +// Get retrieves a plugin with the specified name from the catalog. It first +// looks for external plugins with this name and then looks for builtin plugins. +// It returns a PluginRunner or an error if no plugin was found. +func (c *PluginCatalog) Get(name string) (*pluginutil.PluginRunner, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + // If the directory isn't set only look for builtin plugins. + if c.directory != "" { + // Look for external plugins in the barrier + out, err := c.catalogView.Get(name) + if err != nil { + return nil, fmt.Errorf("failed to retrieve plugin \"%s\": %v", name, err) + } + if out != nil { + entry := new(pluginutil.PluginRunner) + if err := jsonutil.DecodeJSON(out.Value, entry); err != nil { + return nil, fmt.Errorf("failed to decode plugin entry: %v", err) + } + + // prepend the plugin directory to the command + entry.Command = filepath.Join(c.directory, entry.Command) + + return entry, nil + } + } + // Look for builtin plugins + if factory, ok := builtinplugins.Get(name); ok { + return &pluginutil.PluginRunner{ + Name: name, + Builtin: true, + BuiltinFactory: factory, + }, nil + } + + return nil, nil +} + +// Set registers a new external plugin with the catalog, or updates an existing +// external plugin. It takes the name, command and SHA256 of the plugin. +func (c *PluginCatalog) Set(name, command string, sha256 []byte) error { + if c.directory == "" { + return ErrDirectoryNotConfigured + } + + c.lock.Lock() + defer c.lock.Unlock() + + parts := strings.Split(command, " ") + + // Best effort check to make sure the command isn't breaking out of the + // configured plugin directory. + commandFull := filepath.Join(c.directory, parts[0]) + sym, err := filepath.EvalSymlinks(commandFull) + if err != nil { + return fmt.Errorf("error while validating the command path: %v", err) + } + symAbs, err := filepath.Abs(filepath.Dir(sym)) + if err != nil { + return fmt.Errorf("error while validating the command path: %v", err) + } + + if symAbs != c.directory { + return errors.New("can not execute files outside of configured plugin directory") + } + + entry := &pluginutil.PluginRunner{ + Name: name, + Command: parts[0], + Args: parts[1:], + Sha256: sha256, + Builtin: false, + } + + buf, err := json.Marshal(entry) + if err != nil { + return fmt.Errorf("failed to encode plugin entry: %v", err) + } + + logicalEntry := logical.StorageEntry{ + Key: name, + Value: buf, + } + if err := c.catalogView.Put(&logicalEntry); err != nil { + return fmt.Errorf("failed to persist plugin entry: %v", err) + } + return nil +} + +// Delete is used to remove an external plugin from the catalog. Builtin plugins +// can not be deleted. +func (c *PluginCatalog) Delete(name string) error { + c.lock.Lock() + defer c.lock.Unlock() + + return c.catalogView.Delete(name) +} + +// List returns a list of all the known plugin names. If an external and builtin +// plugin share the same name, only one instance of the name will be returned. +func (c *PluginCatalog) List() ([]string, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + // Collect keys for external plugins in the barrier. + keys, err := logical.CollectKeys(c.catalogView) + if err != nil { + return nil, err + } + + // Get the keys for builtin plugins + builtinKeys := builtinplugins.Keys() + + // Use a map to unique the two lists + mapKeys := make(map[string]bool) + + for _, plugin := range keys { + mapKeys[plugin] = true + } + + for _, plugin := range builtinKeys { + mapKeys[plugin] = true + } + + retList := make([]string, len(mapKeys)) + i := 0 + for k := range mapKeys { + retList[i] = k + i++ + } + // sort for consistent ordering of builtin pluings + sort.Strings(retList) + + return retList, nil +} diff --git a/vault/plugin_catalog_test.go b/vault/plugin_catalog_test.go new file mode 100644 index 000000000..6cfacda7e --- /dev/null +++ b/vault/plugin_catalog_test.go @@ -0,0 +1,176 @@ +package vault + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "testing" + + "github.com/hashicorp/vault/helper/builtinplugins" + "github.com/hashicorp/vault/helper/pluginutil" +) + +func TestPluginCatalog_CRUD(t *testing.T) { + core, _, _ := TestCoreUnsealed(t) + + sym, err := filepath.EvalSymlinks(os.TempDir()) + if err != nil { + t.Fatalf("error: %v", err) + } + core.pluginCatalog.directory = sym + + // Get builtin plugin + p, err := core.pluginCatalog.Get("mysql-database-plugin") + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + expectedBuiltin := &pluginutil.PluginRunner{ + Name: "mysql-database-plugin", + Builtin: true, + } + expectedBuiltin.BuiltinFactory, _ = builtinplugins.Get("mysql-database-plugin") + + if &(p.BuiltinFactory) == &(expectedBuiltin.BuiltinFactory) { + t.Fatal("expected BuiltinFactory did not match actual") + } + expectedBuiltin.BuiltinFactory = nil + p.BuiltinFactory = nil + if !reflect.DeepEqual(p, expectedBuiltin) { + t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", p, expectedBuiltin) + } + + // Set a plugin, test overwriting a builtin plugin + file, err := ioutil.TempFile(os.TempDir(), "temp") + if err != nil { + t.Fatal(err) + } + defer file.Close() + + command := fmt.Sprintf("%s --test", filepath.Base(file.Name())) + err = core.pluginCatalog.Set("mysql-database-plugin", command, []byte{'1'}) + if err != nil { + t.Fatal(err) + } + + // Get the plugin + p, err = core.pluginCatalog.Get("mysql-database-plugin") + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + expected := &pluginutil.PluginRunner{ + Name: "mysql-database-plugin", + Command: filepath.Join(sym, filepath.Base(file.Name())), + Args: []string{"--test"}, + Sha256: []byte{'1'}, + Builtin: false, + } + + if !reflect.DeepEqual(p, expected) { + t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", p, expected) + } + + // Delete the plugin + err = core.pluginCatalog.Delete("mysql-database-plugin") + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + + // Get builtin plugin + p, err = core.pluginCatalog.Get("mysql-database-plugin") + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + expectedBuiltin = &pluginutil.PluginRunner{ + Name: "mysql-database-plugin", + Builtin: true, + } + expectedBuiltin.BuiltinFactory, _ = builtinplugins.Get("mysql-database-plugin") + + if &(p.BuiltinFactory) == &(expectedBuiltin.BuiltinFactory) { + t.Fatal("expected BuiltinFactory did not match actual") + } + expectedBuiltin.BuiltinFactory = nil + p.BuiltinFactory = nil + if !reflect.DeepEqual(p, expectedBuiltin) { + t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", p, expectedBuiltin) + } + +} + +func TestPluginCatalog_List(t *testing.T) { + core, _, _ := TestCoreUnsealed(t) + + sym, err := filepath.EvalSymlinks(os.TempDir()) + if err != nil { + t.Fatalf("error: %v", err) + } + core.pluginCatalog.directory = sym + + // Get builtin plugins and sort them + builtinKeys := builtinplugins.Keys() + sort.Strings(builtinKeys) + + // List only builtin plugins + plugins, err := core.pluginCatalog.List() + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + if len(plugins) != len(builtinKeys) { + t.Fatalf("unexpected length of plugin list, expected %d, got %d", len(builtinKeys), len(plugins)) + } + + for i, p := range builtinKeys { + if !reflect.DeepEqual(plugins[i], p) { + t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", plugins[i], p) + } + } + + // Set a plugin, test overwriting a builtin plugin + file, err := ioutil.TempFile(os.TempDir(), "temp") + if err != nil { + t.Fatal(err) + } + defer file.Close() + + command := fmt.Sprintf("%s --test", filepath.Base(file.Name())) + err = core.pluginCatalog.Set("mysql-database-plugin", command, []byte{'1'}) + if err != nil { + t.Fatal(err) + } + + // Set another plugin + err = core.pluginCatalog.Set("aaaaaaa", command, []byte{'1'}) + if err != nil { + t.Fatal(err) + } + + // List the plugins + plugins, err = core.pluginCatalog.List() + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + if len(plugins) != len(builtinKeys)+1 { + t.Fatalf("unexpected length of plugin list, expected %d, got %d", len(builtinKeys)+1, len(plugins)) + } + + // verify the first plugin is the one we just created. + if !reflect.DeepEqual(plugins[0], "aaaaaaa") { + t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", plugins[0], "aaaaaaa") + } + + // verify the builtin pluings are correct + for i, p := range builtinKeys { + if !reflect.DeepEqual(plugins[i+1], p) { + t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", plugins[i+1], p) + } + } + +} diff --git a/vault/request_handling.go b/vault/request_handling.go index ad37b5aee..1326ef518 100644 --- a/vault/request_handling.go +++ b/vault/request_handling.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/vault/helper/jsonutil" "github.com/hashicorp/vault/helper/policyutil" "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/helper/wrapping" "github.com/hashicorp/vault/logical" ) @@ -216,7 +217,7 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r } if wrapTTL > 0 { - resp.WrapInfo = &logical.ResponseWrapInfo{ + resp.WrapInfo = &wrapping.ResponseWrapInfo{ TTL: wrapTTL, Format: wrapFormat, } @@ -361,7 +362,7 @@ func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *log } if wrapTTL > 0 { - resp.WrapInfo = &logical.ResponseWrapInfo{ + resp.WrapInfo = &wrapping.ResponseWrapInfo{ TTL: wrapTTL, Format: wrapFormat, } diff --git a/vault/testing.go b/vault/testing.go index b567fe75e..a8c1f16bd 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -8,9 +8,12 @@ import ( "crypto/x509" "encoding/pem" "fmt" + "io" "net" "net/http" + "os" "os/exec" + "path/filepath" "testing" "time" @@ -293,6 +296,45 @@ func TestKeyCopy(key []byte) []byte { return result } +func TestDynamicSystemView(c *Core) *dynamicSystemView { + me := &MountEntry{ + Config: MountConfig{ + DefaultLeaseTTL: 24 * time.Hour, + MaxLeaseTTL: 2 * 24 * time.Hour, + }, + } + + return &dynamicSystemView{c, me} +} + +func TestAddTestPlugin(t testing.TB, c *Core, name, testFunc string) { + file, err := os.Open(os.Args[0]) + if err != nil { + t.Fatal(err) + } + defer file.Close() + + hash := sha256.New() + + _, err = io.Copy(hash, file) + if err != nil { + t.Fatal(err) + } + + sum := hash.Sum(nil) + c.pluginCatalog.directory, err = filepath.EvalSymlinks(os.Args[0]) + if err != nil { + t.Fatal(err) + } + c.pluginCatalog.directory = filepath.Dir(c.pluginCatalog.directory) + + command := fmt.Sprintf("%s --test.run=%s", filepath.Base(os.Args[0]), testFunc) + err = c.pluginCatalog.Set(name, command, sum) + if err != nil { + t.Fatal(err) + } +} + var testLogicalBackends = map[string]logical.Factory{} // Starts the test server which responds to SSH authentication. diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE new file mode 100644 index 000000000..82b4de97c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md new file mode 100644 index 000000000..2058cfb68 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -0,0 +1,161 @@ +# Go Plugin System over RPC + +`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system +that has been in use by HashiCorp tooling for over 3 years. While initially +created for [Packer](https://www.packer.io), it has since been used by +[Terraform](https://www.terraform.io) and [Otto](https://www.ottoproject.io), +with plans to also use it for [Nomad](https://www.nomadproject.io) and +[Vault](https://www.vaultproject.io). + +While the plugin system is over RPC, it is currently only designed to work +over a local [reliable] network. Plugins over a real network are not supported +and will lead to unexpected behavior. + +This plugin system has been used on millions of machines across many different +projects and has proven to be battle hardened and ready for production use. + +## Features + +The HashiCorp plugin system supports a number of features: + +**Plugins are Go interface implementations.** This makes writing and consuming +plugins feel very natural. To a plugin author: you just implement an +interface as if it were going to run in the same process. For a plugin user: +you just use and call functions on an interface as if it were in the same +process. This plugin system handles the communication in between. + +**Complex arguments and return values are supported.** This library +provides APIs for handling complex arguments and return values such +as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library +(`MuxBroker`) for creating new connections between the client/server to +serve additional interfaces or transfer raw data. + +**Bidirectional communication.** Because the plugin system supports +complex arguments, the host process can send it interface implementations +and the plugin can call back into the host process. + +**Built-in Logging.** Any plugins that use the `log` standard library +will have log data automatically sent to the host process. The host +process will mirror this output prefixed with the path to the plugin +binary. This makes debugging with plugins simple. + +**Protocol Versioning.** A very basic "protocol version" is supported that +can be incremented to invalidate any previous plugins. This is useful when +interface signatures are changing, protocol level changes are necessary, +etc. When a protocol version is incompatible, a human friendly error +message is shown to the end user. + +**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue +to use stdout/stderr as usual and the output will get mirrored back to +the host process. The host process can control what `io.Writer` these +streams go to to prevent this from happening. + +**TTY Preservation.** Plugin subprocesses are connected to the identical +stdin file descriptor as the host process, allowing software that requires +a TTY to work. For example, a plugin can execute `ssh` and even though there +are multiple subprocesses and RPC happening, it will look and act perfectly +to the end user. + +**Host upgrade while a plugin is running.** Plugins can be "reattached" +so that the host process can be upgraded while the plugin is still running. +This requires the host/plugin to know this is possible and daemonize +properly. `NewClient` takes a `ReattachConfig` to determine if and how to +reattach. + +## Architecture + +The HashiCorp plugin system works by launching subprocesses and communicating +over RPC (using standard `net/rpc`). A single connection is made between +any plugin and the host process, and we use a +[connection multiplexing](https://github.com/hashicorp/yamux) +library to multiplex any other connections on top. + +This architecture has a number of benefits: + + * Plugins can't crash your host process: A panic in a plugin doesn't + panic the plugin user. + + * Plugins are very easy to write: just write a Go application and `go build`. + Theoretically you could also use another language as long as it can + communicate the Go `net/rpc` protocol but this hasn't yet been tried. + + * Plugins are very easy to install: just put the binary in a location where + the host will find it (depends on the host but this library also provides + helpers), and the plugin host handles the rest. + + * Plugins can be relatively secure: The plugin only has access to the + interfaces and args given to it, not to the entire memory space of the + process. More security features are planned (see the coming soon section + below). + +## Usage + +To use the plugin system, you must take the following steps. These are +high-level steps that must be done. Examples are available in the +`examples/` directory. + + 1. Choose the interface(s) you want to expose for plugins. + + 2. For each interface, implement an implementation of that interface + that communicates over an `*rpc.Client` (from the standard `net/rpc` + package) for every function call. Likewise, implement the RPC server + struct this communicates to which is then communicating to a real, + concrete implementation. + + 3. Create a `Plugin` implementation that knows how to create the RPC + client/server for a given plugin type. + + 4. Plugin authors call `plugin.Serve` to serve a plugin from the + `main` function. + + 5. Plugin users use `plugin.Client` to launch a subprocess and request + an interface implementation over RPC. + +That's it! In practice, step 2 is the most tedious and time consuming step. +Even so, it isn't very difficult and you can see examples in the `examples/` +directory as well as throughout our various open source projects. + +For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin). + +## Roadmap + +Our plugin system is constantly evolving. As we use the plugin system for +new projects or for new features in existing projects, we constantly find +improvements we can make. + +At this point in time, the roadmap for the plugin system is: + +**Cryptographically Secure Plugins.** We'll implement signing plugins +and loading signed plugins in order to allow Vault to make use of multi-process +in a secure way. + +**Semantic Versioning.** Plugins will be able to implement a semantic version. +This plugin system will give host processes a system for constraining +versions. This is in addition to the protocol versioning already present +which is more for larger underlying changes. + +**Plugin fetching.** We will integrate with [go-getter](https://github.com/hashicorp/go-getter) +to support automatic download + install of plugins. Paired with cryptographically +secure plugins (above), we can make this a safe operation for an amazing +user experience. + +## What About Shared Libraries? + +When we started using plugins (late 2012, early 2013), plugins over RPC +were the only option since Go didn't support dynamic library loading. Today, +Go still doesn't support dynamic library loading, but they do intend to. +Since 2012, our plugin system has stabilized from millions of users using it, +and has many benefits we've come to value greatly. + +For example, we intend to use this plugin system in +[Vault](https://www.vaultproject.io), and dynamic library loading will +simply never be acceptable in Vault for security reasons. That is an extreme +example, but we believe our library system has more upsides than downsides +over dynamic library loading and since we've had it built and tested for years, +we'll likely continue to use it. + +Shared libraries have one major advantage over our system which is much +higher performance. In real world scenarios across our various tools, +we've never required any more performance out of our plugin system and it +has seen very high throughput, so this isn't a concern for us at the moment. + diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go new file mode 100644 index 000000000..b69d41b28 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -0,0 +1,666 @@ +package plugin + +import ( + "bufio" + "crypto/subtle" + "crypto/tls" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "log" + "net" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode" +) + +// If this is 1, then we've called CleanupClients. This can be used +// by plugin RPC implementations to change error behavior since you +// can expected network connection errors at this point. This should be +// read by using sync/atomic. +var Killed uint32 = 0 + +// This is a slice of the "managed" clients which are cleaned up when +// calling Cleanup +var managedClients = make([]*Client, 0, 5) +var managedClientsLock sync.Mutex + +// Error types +var ( + // ErrProcessNotFound is returned when a client is instantiated to + // reattach to an existing process and it isn't found. + ErrProcessNotFound = errors.New("Reattachment process not found") + + // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match + // the one provided in the SecureConfig. + ErrChecksumsDoNotMatch = errors.New("checksums did not match") + + // ErrSecureNoChecksum is returned when an empty checksum is provided to the + // SecureConfig. + ErrSecureConfigNoChecksum = errors.New("no checksum provided") + + // ErrSecureNoHash is returned when a nil Hash object is provided to the + // SecureConfig. + ErrSecureConfigNoHash = errors.New("no hash implementation provided") + + // ErrSecureConfigAndReattach is returned when both Reattach and + // SecureConfig are set. + ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set") +) + +// Client handles the lifecycle of a plugin application. It launches +// plugins, connects to them, dispenses interface implementations, and handles +// killing the process. +// +// Plugin hosts should use one Client for each plugin executable. To +// dispense a plugin type, use the `Client.Client` function, and then +// cal `Dispense`. This awkward API is mostly historical but is used to split +// the client that deals with subprocess management and the client that +// does RPC management. +// +// See NewClient and ClientConfig for using a Client. +type Client struct { + config *ClientConfig + exited bool + doneLogging chan struct{} + l sync.Mutex + address net.Addr + process *os.Process + client *RPCClient +} + +// ClientConfig is the configuration used to initialize a new +// plugin client. After being used to initialize a plugin client, +// that configuration must not be modified again. +type ClientConfig struct { + // HandshakeConfig is the configuration that must match servers. + HandshakeConfig + + // Plugins are the plugins that can be consumed. + Plugins map[string]Plugin + + // One of the following must be set, but not both. + // + // Cmd is the unstarted subprocess for starting the plugin. If this is + // set, then the Client starts the plugin process on its own and connects + // to it. + // + // Reattach is configuration for reattaching to an existing plugin process + // that is already running. This isn't common. + Cmd *exec.Cmd + Reattach *ReattachConfig + + // SecureConfig is configuration for verifying the integrity of the + // executable. It can not be used with Reattach. + SecureConfig *SecureConfig + + // TLSConfig is used to enable TLS on the RPC client. + TLSConfig *tls.Config + + // Managed represents if the client should be managed by the + // plugin package or not. If true, then by calling CleanupClients, + // it will automatically be cleaned up. Otherwise, the client + // user is fully responsible for making sure to Kill all plugin + // clients. By default the client is _not_ managed. + Managed bool + + // The minimum and maximum port to use for communicating with + // the subprocess. If not set, this defaults to 10,000 and 25,000 + // respectively. + MinPort, MaxPort uint + + // StartTimeout is the timeout to wait for the plugin to say it + // has started successfully. + StartTimeout time.Duration + + // If non-nil, then the stderr of the client will be written to here + // (as well as the log). This is the original os.Stderr of the subprocess. + // This isn't the output of synced stderr. + Stderr io.Writer + + // SyncStdout, SyncStderr can be set to override the + // respective os.Std* values in the plugin. Care should be taken to + // avoid races here. If these are nil, then this will automatically be + // hooked up to os.Stdin, Stdout, and Stderr, respectively. + // + // If the default values (nil) are used, then this package will not + // sync any of these streams. + SyncStdout io.Writer + SyncStderr io.Writer +} + +// ReattachConfig is used to configure a client to reattach to an +// already-running plugin process. You can retrieve this information by +// calling ReattachConfig on Client. +type ReattachConfig struct { + Addr net.Addr + Pid int +} + +// SecureConfig is used to configure a client to verify the integrity of an +// executable before running. It does this by verifying the checksum is +// expected. Hash is used to specify the hashing method to use when checksumming +// the file. The configuration is verified by the client by calling the +// SecureConfig.Check() function. +// +// The host process should ensure the checksum was provided by a trusted and +// authoritative source. The binary should be installed in such a way that it +// can not be modified by an unauthorized user between the time of this check +// and the time of execution. +type SecureConfig struct { + Checksum []byte + Hash hash.Hash +} + +// Check takes the filepath to an executable and returns true if the checksum of +// the file matches the checksum provided in the SecureConfig. +func (s *SecureConfig) Check(filePath string) (bool, error) { + if len(s.Checksum) == 0 { + return false, ErrSecureConfigNoChecksum + } + + if s.Hash == nil { + return false, ErrSecureConfigNoHash + } + + file, err := os.Open(filePath) + if err != nil { + return false, err + } + defer file.Close() + + _, err = io.Copy(s.Hash, file) + if err != nil { + return false, err + } + + sum := s.Hash.Sum(nil) + + return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil +} + +// This makes sure all the managed subprocesses are killed and properly +// logged. This should be called before the parent process running the +// plugins exits. +// +// This must only be called _once_. +func CleanupClients() { + // Set the killed to true so that we don't get unexpected panics + atomic.StoreUint32(&Killed, 1) + + // Kill all the managed clients in parallel and use a WaitGroup + // to wait for them all to finish up. + var wg sync.WaitGroup + managedClientsLock.Lock() + for _, client := range managedClients { + wg.Add(1) + + go func(client *Client) { + client.Kill() + wg.Done() + }(client) + } + managedClientsLock.Unlock() + + log.Println("[DEBUG] plugin: waiting for all plugin processes to complete...") + wg.Wait() +} + +// Creates a new plugin client which manages the lifecycle of an external +// plugin and gets the address for the RPC connection. +// +// The client must be cleaned up at some point by calling Kill(). If +// the client is a managed client (created with NewManagedClient) you +// can just call CleanupClients at the end of your program and they will +// be properly cleaned. +func NewClient(config *ClientConfig) (c *Client) { + if config.MinPort == 0 && config.MaxPort == 0 { + config.MinPort = 10000 + config.MaxPort = 25000 + } + + if config.StartTimeout == 0 { + config.StartTimeout = 1 * time.Minute + } + + if config.Stderr == nil { + config.Stderr = ioutil.Discard + } + + if config.SyncStdout == nil { + config.SyncStdout = ioutil.Discard + } + if config.SyncStderr == nil { + config.SyncStderr = ioutil.Discard + } + + c = &Client{config: config} + if config.Managed { + managedClientsLock.Lock() + managedClients = append(managedClients, c) + managedClientsLock.Unlock() + } + + return +} + +// Client returns an RPC client for the plugin. +// +// Subsequent calls to this will return the same RPC client. +func (c *Client) Client() (*RPCClient, error) { + addr, err := c.Start() + if err != nil { + return nil, err + } + + c.l.Lock() + defer c.l.Unlock() + + if c.client != nil { + return c.client, nil + } + + // Connect to the client + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + if c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + // Create the actual RPC client + c.client, err = NewRPCClient(conn, c.config.Plugins) + if err != nil { + conn.Close() + return nil, err + } + + // Begin the stream syncing so that stdin, out, err work properly + err = c.client.SyncStreams( + c.config.SyncStdout, + c.config.SyncStderr) + if err != nil { + c.client.Close() + c.client = nil + return nil, err + } + + return c.client, nil +} + +// Tells whether or not the underlying process has exited. +func (c *Client) Exited() bool { + c.l.Lock() + defer c.l.Unlock() + return c.exited +} + +// End the executing subprocess (if it is running) and perform any cleanup +// tasks necessary such as capturing any remaining logs and so on. +// +// This method blocks until the process successfully exits. +// +// This method can safely be called multiple times. +func (c *Client) Kill() { + // Grab a lock to read some private fields. + c.l.Lock() + process := c.process + addr := c.address + doneCh := c.doneLogging + c.l.Unlock() + + // If there is no process, we never started anything. Nothing to kill. + if process == nil { + return + } + + // We need to check for address here. It is possible that the plugin + // started (process != nil) but has no address (addr == nil) if the + // plugin failed at startup. If we do have an address, we need to close + // the plugin net connections. + graceful := false + if addr != nil { + // Close the client to cleanly exit the process. + client, err := c.Client() + if err == nil { + err = client.Close() + + // If there is no error, then we attempt to wait for a graceful + // exit. If there was an error, we assume that graceful cleanup + // won't happen and just force kill. + graceful = err == nil + if err != nil { + // If there was an error just log it. We're going to force + // kill in a moment anyways. + log.Printf( + "[WARN] plugin: error closing client during Kill: %s", err) + } + } + } + + // If we're attempting a graceful exit, then we wait for a short period + // of time to allow that to happen. To wait for this we just wait on the + // doneCh which would be closed if the process exits. + if graceful { + select { + case <-doneCh: + return + case <-time.After(250 * time.Millisecond): + } + } + + // If graceful exiting failed, just kill it + process.Kill() + + // Wait for the client to finish logging so we have a complete log + <-doneCh +} + +// Starts the underlying subprocess, communicating with it to negotiate +// a port for RPC connections, and returning the address to connect via RPC. +// +// This method is safe to call multiple times. Subsequent calls have no effect. +// Once a client has been started once, it cannot be started again, even if +// it was killed. +func (c *Client) Start() (addr net.Addr, err error) { + c.l.Lock() + defer c.l.Unlock() + + if c.address != nil { + return c.address, nil + } + + // If one of cmd or reattach isn't set, then it is an error. We wrap + // this in a {} for scoping reasons, and hopeful that the escape + // analysis will pop the stock here. + { + cmdSet := c.config.Cmd != nil + attachSet := c.config.Reattach != nil + secureSet := c.config.SecureConfig != nil + if cmdSet == attachSet { + return nil, fmt.Errorf("Only one of Cmd or Reattach must be set") + } + + if secureSet && attachSet { + return nil, ErrSecureConfigAndReattach + } + } + + // Create the logging channel for when we kill + c.doneLogging = make(chan struct{}) + + if c.config.Reattach != nil { + // Verify the process still exists. If not, then it is an error + p, err := os.FindProcess(c.config.Reattach.Pid) + if err != nil { + return nil, err + } + + // Attempt to connect to the addr since on Unix systems FindProcess + // doesn't actually return an error if it can't find the process. + conn, err := net.Dial( + c.config.Reattach.Addr.Network(), + c.config.Reattach.Addr.String()) + if err != nil { + p.Kill() + return nil, ErrProcessNotFound + } + conn.Close() + + // Goroutine to mark exit status + go func(pid int) { + // Wait for the process to die + pidWait(pid) + + // Log so we can see it + log.Printf("[DEBUG] plugin: reattached plugin process exited\n") + + // Mark it + c.l.Lock() + defer c.l.Unlock() + c.exited = true + + // Close the logging channel since that doesn't work on reattach + close(c.doneLogging) + }(p.Pid) + + // Set the address and process + c.address = c.config.Reattach.Addr + c.process = p + + return c.address, nil + } + + env := []string{ + fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue), + fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort), + fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), + } + + stdout_r, stdout_w := io.Pipe() + stderr_r, stderr_w := io.Pipe() + + cmd := c.config.Cmd + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + cmd.Stdin = os.Stdin + cmd.Stderr = stderr_w + cmd.Stdout = stdout_w + + if c.config.SecureConfig != nil { + if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { + return nil, fmt.Errorf("error verifying checksum: %s", err) + } else if !ok { + return nil, ErrChecksumsDoNotMatch + } + } + + log.Printf("[DEBUG] plugin: starting plugin: %s %#v", cmd.Path, cmd.Args) + err = cmd.Start() + if err != nil { + return + } + + // Set the process + c.process = cmd.Process + + // Make sure the command is properly cleaned up if there is an error + defer func() { + r := recover() + + if err != nil || r != nil { + cmd.Process.Kill() + } + + if r != nil { + panic(r) + } + }() + + // Start goroutine to wait for process to exit + exitCh := make(chan struct{}) + go func() { + // Make sure we close the write end of our stderr/stdout so + // that the readers send EOF properly. + defer stderr_w.Close() + defer stdout_w.Close() + + // Wait for the command to end. + cmd.Wait() + + // Log and make sure to flush the logs write away + log.Printf("[DEBUG] plugin: %s: plugin process exited\n", cmd.Path) + os.Stderr.Sync() + + // Mark that we exited + close(exitCh) + + // Set that we exited, which takes a lock + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }() + + // Start goroutine that logs the stderr + go c.logStderr(stderr_r) + + // Start a goroutine that is going to be reading the lines + // out of stdout + linesCh := make(chan []byte) + go func() { + defer close(linesCh) + + buf := bufio.NewReader(stdout_r) + for { + line, err := buf.ReadBytes('\n') + if line != nil { + linesCh <- line + } + + if err == io.EOF { + return + } + } + }() + + // Make sure after we exit we read the lines from stdout forever + // so they don't block since it is an io.Pipe + defer func() { + go func() { + for _ = range linesCh { + } + }() + }() + + // Some channels for the next step + timeout := time.After(c.config.StartTimeout) + + // Start looking for the address + log.Printf("[DEBUG] plugin: waiting for RPC address for: %s", cmd.Path) + select { + case <-timeout: + err = errors.New("timeout while waiting for plugin to start") + case <-exitCh: + err = errors.New("plugin exited before we could connect") + case lineBytes := <-linesCh: + // Trim the line and split by "|" in order to get the parts of + // the output. + line := strings.TrimSpace(string(lineBytes)) + parts := strings.SplitN(line, "|", 4) + if len(parts) < 4 { + err = fmt.Errorf( + "Unrecognized remote plugin message: %s\n\n"+ + "This usually means that the plugin is either invalid or simply\n"+ + "needs to be recompiled to support the latest protocol.", line) + return + } + + // Check the core protocol. Wrapped in a {} for scoping. + { + var coreProtocol int64 + coreProtocol, err = strconv.ParseInt(parts[0], 10, 0) + if err != nil { + err = fmt.Errorf("Error parsing core protocol version: %s", err) + return + } + + if int(coreProtocol) != CoreProtocolVersion { + err = fmt.Errorf("Incompatible core API version with plugin. "+ + "Plugin version: %s, Ours: %d\n\n"+ + "To fix this, the plugin usually only needs to be recompiled.\n"+ + "Please report this to the plugin author.", parts[0], CoreProtocolVersion) + return + } + } + + // Parse the protocol version + var protocol int64 + protocol, err = strconv.ParseInt(parts[1], 10, 0) + if err != nil { + err = fmt.Errorf("Error parsing protocol version: %s", err) + return + } + + // Test the API version + if uint(protocol) != c.config.ProtocolVersion { + err = fmt.Errorf("Incompatible API version with plugin. "+ + "Plugin version: %s, Ours: %d", parts[1], c.config.ProtocolVersion) + return + } + + switch parts[2] { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", parts[3]) + case "unix": + addr, err = net.ResolveUnixAddr("unix", parts[3]) + default: + err = fmt.Errorf("Unknown address type: %s", parts[3]) + } + } + + c.address = addr + return +} + +// ReattachConfig returns the information that must be provided to NewClient +// to reattach to the plugin process that this client started. This is +// useful for plugins that detach from their parent process. +// +// If this returns nil then the process hasn't been started yet. Please +// call Start or Client before calling this. +func (c *Client) ReattachConfig() *ReattachConfig { + c.l.Lock() + defer c.l.Unlock() + + if c.address == nil { + return nil + } + + if c.config.Cmd != nil && c.config.Cmd.Process == nil { + return nil + } + + // If we connected via reattach, just return the information as-is + if c.config.Reattach != nil { + return c.config.Reattach + } + + return &ReattachConfig{ + Addr: c.address, + Pid: c.config.Cmd.Process.Pid, + } +} + +func (c *Client) logStderr(r io.Reader) { + bufR := bufio.NewReader(r) + for { + line, err := bufR.ReadString('\n') + if line != "" { + c.config.Stderr.Write([]byte(line)) + + line = strings.TrimRightFunc(line, unicode.IsSpace) + log.Printf("[DEBUG] plugin: %s: %s", filepath.Base(c.config.Cmd.Path), line) + } + + if err == io.EOF { + break + } + } + + // Flag that we've completed logging for others + close(c.doneLogging) +} diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go new file mode 100644 index 000000000..d22c566ed --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/discover.go @@ -0,0 +1,28 @@ +package plugin + +import ( + "path/filepath" +) + +// Discover discovers plugins that are in a given directory. +// +// The directory doesn't need to be absolute. For example, "." will work fine. +// +// This currently assumes any file matching the glob is a plugin. +// In the future this may be smarter about checking that a file is +// executable and so on. +// +// TODO: test +func Discover(glob, dir string) ([]string, error) { + var err error + + // Make the directory absolute if it isn't already + if !filepath.IsAbs(dir) { + dir, err = filepath.Abs(dir) + if err != nil { + return nil, err + } + } + + return filepath.Glob(filepath.Join(dir, glob)) +} diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go new file mode 100644 index 000000000..22a7baa6a --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/error.go @@ -0,0 +1,24 @@ +package plugin + +// This is a type that wraps error types so that they can be messaged +// across RPC channels. Since "error" is an interface, we can't always +// gob-encode the underlying structure. This is a valid error interface +// implementer that we will push across. +type BasicError struct { + Message string +} + +// NewBasicError is used to create a BasicError. +// +// err is allowed to be nil. +func NewBasicError(err error) *BasicError { + if err == nil { + return nil + } + + return &BasicError{err.Error()} +} + +func (e *BasicError) Error() string { + return e.Message +} diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go new file mode 100644 index 000000000..01c45ad7c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mux_broker.go @@ -0,0 +1,204 @@ +package plugin + +import ( + "encoding/binary" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/yamux" +) + +// MuxBroker is responsible for brokering multiplexed connections by unique ID. +// +// It is used by plugins to multiplex multiple RPC connections and data +// streams on top of a single connection between the plugin process and the +// host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new multiplexed streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type MuxBroker struct { + nextId uint32 + session *yamux.Session + streams map[uint32]*muxBrokerPending + + sync.Mutex +} + +type muxBrokerPending struct { + ch chan net.Conn + doneCh chan struct{} +} + +func newMuxBroker(s *yamux.Session) *MuxBroker { + return &MuxBroker{ + session: s, + streams: make(map[uint32]*muxBrokerPending), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (m *MuxBroker) Accept(id uint32) (net.Conn, error) { + var c net.Conn + p := m.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + m.Lock() + defer m.Unlock() + delete(m.streams, id) + + return nil, fmt.Errorf("timeout waiting for accept") + } + + // Ack our connection + if err := binary.Write(c, binary.LittleEndian, id); err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve an RPC server on that stream ID. This is used to easily serve +// complex arguments. +// +// The served interface is always registered to the "Plugin" name. +func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) { + conn, err := m.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + + serve(conn, "Plugin", v) +} + +// Close closes the connection and all sub-connections. +func (m *MuxBroker) Close() error { + return m.session.Close() +} + +// Dial opens a connection by ID. +func (m *MuxBroker) Dial(id uint32) (net.Conn, error) { + // Open the stream + stream, err := m.session.OpenStream() + if err != nil { + return nil, err + } + + // Write the stream ID onto the wire. + if err := binary.Write(stream, binary.LittleEndian, id); err != nil { + stream.Close() + return nil, err + } + + // Read the ack that we connected. Then we're off! + var ack uint32 + if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil { + stream.Close() + return nil, err + } + if ack != id { + stream.Close() + return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id) + } + + return stream, nil +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of RPC calls. In practice +// we've never seen it happen. +func (m *MuxBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of MuxBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *MuxBroker) Run() { + for { + stream, err := m.session.AcceptStream() + if err != nil { + // Once we receive an error, just exit + break + } + + // Read the stream ID from the stream + var id uint32 + if err := binary.Read(stream, binary.LittleEndian, &id); err != nil { + stream.Close() + continue + } + + // Initialize the waiter + p := m.getStream(id) + select { + case p.ch <- stream: + default: + } + + // Wait for a timeout + go m.timeoutWait(id, p) + } +} + +func (m *MuxBroker) getStream(id uint32) *muxBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &muxBrokerPending{ + ch: make(chan net.Conn, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + timeout := false + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + timeout = true + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) + + // If we timed out, then check if we have a channel in the buffer, + // and if so, close it. + if timeout { + select { + case s := <-p.ch: + s.Close() + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go new file mode 100644 index 000000000..37c8fd653 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -0,0 +1,25 @@ +// The plugin package exposes functions and helpers for communicating to +// plugins which are implemented as standalone binary applications. +// +// plugin.Client fully manages the lifecycle of executing the application, +// connecting to it, and returning the RPC client for dispensing plugins. +// +// plugin.Serve fully manages listeners to expose an RPC server from a binary +// that plugin.Client can connect to. +package plugin + +import ( + "net/rpc" +) + +// Plugin is the interface that is implemented to serve/connect to an +// inteface implementation. +type Plugin interface { + // Server should return the RPC server compatible struct to serve + // the methods that the Client calls over net/rpc. + Server(*MuxBroker) (interface{}, error) + + // Client returns an interface implementation for the plugin you're + // serving that communicates to the server end of the plugin. + Client(*MuxBroker, *rpc.Client) (interface{}, error) +} diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go new file mode 100644 index 000000000..88c999a58 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process.go @@ -0,0 +1,24 @@ +package plugin + +import ( + "time" +) + +// pidAlive checks whether a pid is alive. +func pidAlive(pid int) bool { + return _pidAlive(pid) +} + +// pidWait blocks for a process to exit. +func pidWait(pid int) error { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for range ticker.C { + if !pidAlive(pid) { + break + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go new file mode 100644 index 000000000..70ba546bf --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_posix.go @@ -0,0 +1,19 @@ +// +build !windows + +package plugin + +import ( + "os" + "syscall" +) + +// _pidAlive tests whether a process is alive or not by sending it Signal 0, +// since Go otherwise has no way to test this. +func _pidAlive(pid int) bool { + proc, err := os.FindProcess(pid) + if err == nil { + err = proc.Signal(syscall.Signal(0)) + } + + return err == nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/process_windows.go new file mode 100644 index 000000000..9f7b01809 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_windows.go @@ -0,0 +1,29 @@ +package plugin + +import ( + "syscall" +) + +const ( + // Weird name but matches the MSDN docs + exit_STILL_ACTIVE = 259 + + processDesiredAccess = syscall.STANDARD_RIGHTS_READ | + syscall.PROCESS_QUERY_INFORMATION | + syscall.SYNCHRONIZE +) + +// _pidAlive tests whether a process is alive or not +func _pidAlive(pid int) bool { + h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid)) + if err != nil { + return false + } + + var ec uint32 + if e := syscall.GetExitCodeProcess(h, &ec); e != nil { + return false + } + + return ec == exit_STILL_ACTIVE +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go new file mode 100644 index 000000000..29f9bf063 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go @@ -0,0 +1,123 @@ +package plugin + +import ( + "fmt" + "io" + "net" + "net/rpc" + + "github.com/hashicorp/yamux" +) + +// RPCClient connects to an RPCServer over net/rpc to dispense plugin types. +type RPCClient struct { + broker *MuxBroker + control *rpc.Client + plugins map[string]Plugin + + // These are the streams used for the various stdout/err overrides + stdout, stderr net.Conn +} + +// NewRPCClient creates a client from an already-open connection-like value. +// Dial is typically used instead. +func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { + // Create the yamux client so we can multiplex + mux, err := yamux.Client(conn, nil) + if err != nil { + conn.Close() + return nil, err + } + + // Connect to the control stream. + control, err := mux.Open() + if err != nil { + mux.Close() + return nil, err + } + + // Connect stdout, stderr streams + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Open() + if err != nil { + mux.Close() + return nil, err + } + } + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Build the client using our broker and control channel. + return &RPCClient{ + broker: broker, + control: rpc.NewClient(control), + plugins: plugins, + stdout: stdstream[0], + stderr: stdstream[1], + }, nil +} + +// SyncStreams should be called to enable syncing of stdout, +// stderr with the plugin. +// +// This will return immediately and the syncing will continue to happen +// in the background. You do not need to launch this in a goroutine itself. +// +// This should never be called multiple times. +func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error { + go copyStream("stdout", stdout, c.stdout) + go copyStream("stderr", stderr, c.stderr) + return nil +} + +// Close closes the connection. The client is no longer usable after this +// is called. +func (c *RPCClient) Close() error { + // Call the control channel and ask it to gracefully exit. If this + // errors, then we save it so that we always return an error but we + // want to try to close the other channels anyways. + var empty struct{} + returnErr := c.control.Call("Control.Quit", true, &empty) + + // Close the other streams we have + if err := c.control.Close(); err != nil { + return err + } + if err := c.stdout.Close(); err != nil { + return err + } + if err := c.stderr.Close(); err != nil { + return err + } + if err := c.broker.Close(); err != nil { + return err + } + + // Return back the error we got from Control.Quit. This is very important + // since we MUST return non-nil error if this fails so that Client.Kill + // will properly try a process.Kill. + return returnErr +} + +func (c *RPCClient) Dispense(name string) (interface{}, error) { + p, ok := c.plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + var id uint32 + if err := c.control.Call( + "Dispenser.Dispense", name, &id); err != nil { + return nil, err + } + + conn, err := c.broker.Dial(id) + if err != nil { + return nil, err + } + + return p.Client(c.broker, rpc.NewClient(conn)) +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go new file mode 100644 index 000000000..3984dc891 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -0,0 +1,185 @@ +package plugin + +import ( + "errors" + "fmt" + "io" + "log" + "net" + "net/rpc" + "sync" + + "github.com/hashicorp/yamux" +) + +// RPCServer listens for network connections and then dispenses interface +// implementations over net/rpc. +// +// After setting the fields below, they shouldn't be read again directly +// from the structure which may be reading/writing them concurrently. +type RPCServer struct { + Plugins map[string]Plugin + + // Stdout, Stderr are what this server will use instead of the + // normal stdin/out/err. This is because due to the multi-process nature + // of our plugin system, we can't use the normal process values so we + // make our own custom one we pipe across. + Stdout io.Reader + Stderr io.Reader + + // DoneCh should be set to a non-nil channel that will be closed + // when the control requests the RPC server to end. + DoneCh chan<- struct{} + + lock sync.Mutex +} + +// Accept accepts connections on a listener and serves requests for +// each incoming connection. Accept blocks; the caller typically invokes +// it in a go statement. +func (s *RPCServer) Accept(lis net.Listener) { + for { + conn, err := lis.Accept() + if err != nil { + log.Printf("[ERR] plugin: plugin server: %s", err) + return + } + + go s.ServeConn(conn) + } +} + +// ServeConn runs a single connection. +// +// ServeConn blocks, serving the connection until the client hangs up. +func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) { + // First create the yamux server to wrap this connection + mux, err := yamux.Server(conn, nil) + if err != nil { + conn.Close() + log.Printf("[ERR] plugin: error creating yamux server: %s", err) + return + } + + // Accept the control connection + control, err := mux.Accept() + if err != nil { + mux.Close() + if err != io.EOF { + log.Printf("[ERR] plugin: error accepting control connection: %s", err) + } + + return + } + + // Connect the stdstreams (in, out, err) + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Accept() + if err != nil { + mux.Close() + log.Printf("[ERR] plugin: accepting stream %d: %s", i, err) + return + } + } + + // Copy std streams out to the proper place + go copyStream("stdout", stdstream[0], s.Stdout) + go copyStream("stderr", stdstream[1], s.Stderr) + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Use the control connection to build the dispenser and serve the + // connection. + server := rpc.NewServer() + server.RegisterName("Control", &controlServer{ + server: s, + }) + server.RegisterName("Dispenser", &dispenseServer{ + broker: broker, + plugins: s.Plugins, + }) + server.ServeConn(control) +} + +// done is called internally by the control server to trigger the +// doneCh to close which is listened to by the main process to cleanly +// exit. +func (s *RPCServer) done() { + s.lock.Lock() + defer s.lock.Unlock() + + if s.DoneCh != nil { + close(s.DoneCh) + s.DoneCh = nil + } +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type controlServer struct { + server *RPCServer +} + +func (c *controlServer) Quit( + null bool, response *struct{}) error { + // End the server + c.server.done() + + // Always return true + *response = struct{}{} + + return nil +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type dispenseServer struct { + broker *MuxBroker + plugins map[string]Plugin +} + +func (d *dispenseServer) Dispense( + name string, response *uint32) error { + // Find the function to create this implementation + p, ok := d.plugins[name] + if !ok { + return fmt.Errorf("unknown plugin type: %s", name) + } + + // Create the implementation first so we know if there is an error. + impl, err := p.Server(d.broker) + if err != nil { + // We turn the error into an errors error so that it works across RPC + return errors.New(err.Error()) + } + + // Reserve an ID for our implementation + id := d.broker.NextId() + *response = id + + // Run the rest in a goroutine since it can only happen once this RPC + // call returns. We wait for a connection for the plugin implementation + // and serve it. + go func() { + conn, err := d.broker.Accept(id) + if err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err) + return + } + + serve(conn, "Plugin", impl) + }() + + return nil +} + +func serve(conn io.ReadWriteCloser, name string, v interface{}) { + server := rpc.NewServer() + if err := server.RegisterName(name, v); err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s", err) + return + } + + server.ServeConn(conn) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go new file mode 100644 index 000000000..782a4e119 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -0,0 +1,235 @@ +package plugin + +import ( + "crypto/tls" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "os/signal" + "runtime" + "strconv" + "sync/atomic" +) + +// CoreProtocolVersion is the ProtocolVersion of the plugin system itself. +// We will increment this whenever we change any protocol behavior. This +// will invalidate any prior plugins but will at least allow us to iterate +// on the core in a safe way. We will do our best to do this very +// infrequently. +const CoreProtocolVersion = 1 + +// HandshakeConfig is the configuration used by client and servers to +// handshake before starting a plugin connection. This is embedded by +// both ServeConfig and ClientConfig. +// +// In practice, the plugin host creates a HandshakeConfig that is exported +// and plugins then can easily consume it. +type HandshakeConfig struct { + // ProtocolVersion is the version that clients must match on to + // agree they can communicate. This should match the ProtocolVersion + // set on ClientConfig when using a plugin. + ProtocolVersion uint + + // MagicCookieKey and value are used as a very basic verification + // that a plugin is intended to be launched. This is not a security + // measure, just a UX feature. If the magic cookie doesn't match, + // we show human-friendly output. + MagicCookieKey string + MagicCookieValue string +} + +// ServeConfig configures what sorts of plugins are served. +type ServeConfig struct { + // HandshakeConfig is the configuration that must match clients. + HandshakeConfig + + // Plugins are the plugins that are served. + Plugins map[string]Plugin + + // TLSProvider is a function that returns a configured tls.Config. + TLSProvider func() (*tls.Config, error) +} + +// Serve serves the plugins given by ServeConfig. +// +// Serve doesn't return until the plugin is done being executed. Any +// errors will be outputted to the log. +// +// This is the method that plugins should call in their main() functions. +func Serve(opts *ServeConfig) { + // Validate the handshake config + if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { + fmt.Fprintf(os.Stderr, + "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ + "key or value was set. Please notify the plugin author and report\n"+ + "this as a bug.\n") + os.Exit(1) + } + + // First check the cookie + if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { + fmt.Fprintf(os.Stderr, + "This binary is a plugin. These are not meant to be executed directly.\n"+ + "Please execute the program that consumes these plugins, which will\n"+ + "load any plugins automatically\n") + os.Exit(1) + } + + // Logging goes to the original stderr + log.SetOutput(os.Stderr) + + // Create our new stdout, stderr files. These will override our built-in + // stdout/stderr so that it works across the stream boundary. + stdout_r, stdout_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + stderr_r, stderr_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + + // Register a listener so we can accept a connection + listener, err := serverListener() + if err != nil { + log.Printf("[ERR] plugin: plugin init: %s", err) + return + } + + if opts.TLSProvider != nil { + tlsConfig, err := opts.TLSProvider() + if err != nil { + log.Printf("[ERR] plugin: plugin tls init: %s", err) + return + } + listener = tls.NewListener(listener, tlsConfig) + } + defer listener.Close() + + // Create the channel to tell us when we're done + doneCh := make(chan struct{}) + + // Create the RPC server to dispense + server := &RPCServer{ + Plugins: opts.Plugins, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + } + + // Output the address and service name to stdout so that core can bring it up. + log.Printf("[DEBUG] plugin: plugin address: %s %s\n", + listener.Addr().Network(), listener.Addr().String()) + fmt.Printf("%d|%d|%s|%s\n", + CoreProtocolVersion, + opts.ProtocolVersion, + listener.Addr().Network(), + listener.Addr().String()) + os.Stdout.Sync() + + // Eat the interrupts + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + go func() { + var count int32 = 0 + for { + <-ch + newCount := atomic.AddInt32(&count, 1) + log.Printf( + "[DEBUG] plugin: received interrupt signal (count: %d). Ignoring.", + newCount) + } + }() + + // Set our new out, err + os.Stdout = stdout_w + os.Stderr = stderr_w + + // Serve + go server.Accept(listener) + + // Wait for the graceful exit + <-doneCh +} + +func serverListener() (net.Listener, error) { + if runtime.GOOS == "windows" { + return serverListener_tcp() + } + + return serverListener_unix() +} + +func serverListener_tcp() (net.Listener, error) { + minPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MIN_PORT"), 10, 32) + if err != nil { + return nil, err + } + + maxPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MAX_PORT"), 10, 32) + if err != nil { + return nil, err + } + + for port := minPort; port <= maxPort; port++ { + address := fmt.Sprintf("127.0.0.1:%d", port) + listener, err := net.Listen("tcp", address) + if err == nil { + return listener, nil + } + } + + return nil, errors.New("Couldn't bind plugin TCP listener") +} + +func serverListener_unix() (net.Listener, error) { + tf, err := ioutil.TempFile("", "plugin") + if err != nil { + return nil, err + } + path := tf.Name() + + // Close the file and remove it because it has to not exist for + // the domain socket. + if err := tf.Close(); err != nil { + return nil, err + } + if err := os.Remove(path); err != nil { + return nil, err + } + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + + // Wrap the listener in rmListener so that the Unix domain socket file + // is removed on close. + return &rmListener{ + Listener: l, + Path: path, + }, nil +} + +// rmListener is an implementation of net.Listener that forwards most +// calls to the listener but also removes a file as part of the close. We +// use this to cleanup the unix domain socket on close. +type rmListener struct { + net.Listener + Path string +} + +func (l *rmListener) Close() error { + // Close the listener itself + if err := l.Listener.Close(); err != nil { + return err + } + + // Remove the file + return os.Remove(l.Path) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go new file mode 100644 index 000000000..033079ea0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server_mux.go @@ -0,0 +1,31 @@ +package plugin + +import ( + "fmt" + "os" +) + +// ServeMuxMap is the type that is used to configure ServeMux +type ServeMuxMap map[string]*ServeConfig + +// ServeMux is like Serve, but serves multiple types of plugins determined +// by the argument given on the command-line. +// +// This command doesn't return until the plugin is done being executed. Any +// errors are logged or output to stderr. +func ServeMux(m ServeMuxMap) { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, + "Invoked improperly. This is an internal command that shouldn't\n"+ + "be manually invoked.\n") + os.Exit(1) + } + + opts, ok := m[os.Args[1]] + if !ok { + fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1]) + os.Exit(1) + } + + Serve(opts) +} diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go new file mode 100644 index 000000000..1d547aaaa --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/stream.go @@ -0,0 +1,18 @@ +package plugin + +import ( + "io" + "log" +) + +func copyStream(name string, dst io.Writer, src io.Reader) { + if src == nil { + panic(name + ": src is nil") + } + if dst == nil { + panic(name + ": dst is nil") + } + if _, err := io.Copy(dst, src); err != nil && err != io.EOF { + log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err) + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go new file mode 100644 index 000000000..9086a1b45 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -0,0 +1,76 @@ +package plugin + +import ( + "bytes" + "net" + "net/rpc" + "testing" +) + +// The testing file contains test helpers that you can use outside of +// this package for making it easier to test plugins themselves. + +// TestConn is a helper function for returning a client and server +// net.Conn connected to each other. +func TestConn(t *testing.T) (net.Conn, net.Conn) { + // Listen to any local port. This listener will be closed + // after a single connection is established. + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start a goroutine to accept our client connection + var serverConn net.Conn + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + defer l.Close() + var err error + serverConn, err = l.Accept() + if err != nil { + t.Fatalf("err: %s", err) + } + }() + + // Connect to the server + clientConn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Wait for the server side to acknowledge it has connected + <-doneCh + + return clientConn, serverConn +} + +// TestRPCConn returns a rpc client and server connected to each other. +func TestRPCConn(t *testing.T) (*rpc.Client, *rpc.Server) { + clientConn, serverConn := TestConn(t) + + server := rpc.NewServer() + go server.ServeConn(serverConn) + + client := rpc.NewClient(clientConn) + return client, server +} + +// TestPluginRPCConn returns a plugin RPC client and server that are connected +// together and configured. +func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) { + // Create two net.Conns we can use to shuttle our control connection + clientConn, serverConn := TestConn(t) + + // Start up the server + server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)} + go server.ServeConn(serverConn) + + // Connect the client to the server + client, err := NewRPCClient(clientConn, ps) + if err != nil { + t.Fatalf("err: %s", err) + } + + return client, server +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 9e769960d..ddcd78a8b 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -846,6 +846,12 @@ "revision": "ed905158d87462226a13fe39ddf685ea65f1c11f", "revisionTime": "2016-12-16T18:43:04Z" }, + { + "checksumSHA1": "FOLPOFo4xuUaErsL99EC8azEUjw=", + "path": "github.com/hashicorp/go-plugin", + "revision": "b6691c5cfe7f0ec984114b056889cc90e51e38d0", + "revisionTime": "2017-04-12T21:16:38Z" + }, { "checksumSHA1": "ErJHGU6AVPZM9yoY/xV11TwSjQs=", "path": "github.com/hashicorp/go-retryablehttp", diff --git a/website/source/api/secret/databases/cassandra.html.md b/website/source/api/secret/databases/cassandra.html.md new file mode 100644 index 000000000..5e2b5a836 --- /dev/null +++ b/website/source/api/secret/databases/cassandra.html.md @@ -0,0 +1,96 @@ +--- +layout: "api" +page_title: "Cassandra Database Plugin - HTTP API" +sidebar_current: "docs-http-secret-databases-cassandra-maria" +description: |- + The Cassandra plugin for Vault's Database backend generates database credentials to access Cassandra servers. +--- + +# Cassandra Database Plugin HTTP API + +The Cassandra Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the Cassandra database. + +## Configure Connection + +In addition to the parameters defined by the [Database +Backend](/api/secret/databases/index.html#configure-connection), this plugin +has a number of parameters to further configure a connection. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/database/config/:name` | `204 (empty body)` | + +### Parameters +- `hosts` `(string: )` – Specifies a set of comma-delineated Cassandra + hosts to connect to. + +- `username` `(string: )` – Specifies the username to use for + superuser access. + +- `password` `(string: )` – Specifies the password corresponding to + the given username. + +- `tls` `(bool: true)` – Specifies whether to use TLS when connecting to + Cassandra. + +- `insecure_tls` `(bool: false)` – Specifies whether to skip verification of the + server certificate when using TLS. + +- `pem_bundle` `(string: "")` – Specifies concatenated PEM blocks containing a + certificate and private key; a certificate, private key, and issuing CA + certificate; or just a CA certificate. + +- `pem_json` `(string: "")` – Specifies JSON containing a certificate and + private key; a certificate, private key, and issuing CA certificate; or just a + CA certificate. For convenience format is the same as the output of the + `issue` command from the `pki` backend; see + [the pki documentation](/docs/secrets/pki/index.html). + +- `protocol_version` `(int: 2)` – Specifies the CQL protocol version to use. + +- `connect_timeout` `(string: "5s")` – Specifies the connection timeout to use. + +TLS works as follows: + +- If `tls` is set to true, the connection will use TLS; this happens + automatically if `pem_bundle`, `pem_json`, or `insecure_tls` is set + +- If `insecure_tls` is set to true, the connection will not perform verification + of the server certificate; this also sets `tls` to true + +- If only `issuing_ca` is set in `pem_json`, or the only certificate in + `pem_bundle` is a CA certificate, the given CA certificate will be used for + server certificate verification; otherwise the system CA certificates will be + used + +- If `certificate` and `private_key` are set in `pem_bundle` or `pem_json`, + client auth will be turned on for the connection + +`pem_bundle` should be a PEM-concatenated bundle of a private key + client +certificate, an issuing CA certificate, or both. `pem_json` should contain the +same information; for convenience, the JSON format is the same as that output by +the issue command from the PKI backend. + +### Sample Payload + +```json +{ + "plugin_name": "cassandra-database-plugin", + "allowed_roles": "readonly", + "hosts": "cassandra1.local", + "username": "user", + "password": "pass" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/cassandra/config/connection +``` diff --git a/website/source/api/secret/databases/index.html.md b/website/source/api/secret/databases/index.html.md new file mode 100644 index 000000000..d43e49789 --- /dev/null +++ b/website/source/api/secret/databases/index.html.md @@ -0,0 +1,344 @@ +--- +layout: "api" +page_title: "Databases - HTTP API" +sidebar_current: "docs-http-secret-databases" +description: |- + Top page for database secret backend information +--- + +# Database Secret Backend HTTP API + +This is the API documentation for the Vault Database secret backend. For +general information about the usage and operation of the Database backend, +please see the +[Vault Database backend documentation](/docs/secrets/databases/index.html). + +This documentation assumes the Database backend is mounted at the +`/database` path in Vault. Since it is possible to mount secret backends at +any location, please update your API calls accordingly. + +## Configure Connection + +This endpoint configures the connection string used to communicate with the +desired database. In addition to the parameters listed here, each Database +plugin has additional, database plugin specifig, parameters for this endpoint. +Please read the HTTP API for the plugin you'd wish to configure to see the full +list of additional parameters. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/database/config/:name` | `204 (empty body)` | + +### Parameters +- `name` `(string: )` – Specifies the name for this database + connection. This is specified as part of the URL. + +- `plugin_name` `(string: )` - Specifies the name of the plugin to use + for this connection. + +- `verify_connection` `(bool: true)` – Specifies if the connection is verified + during initial configuration. Defaults to true. + +- `allowed_roles` `(slice: [])` - Array or comma separated string of the roles + allowed to use this connection. Defaults to empty (no roles), if contains a + "*" any role can use this connection. + +### Sample Payload + +```json +{ + "plugin_name": "mysql-database-plugin", + "allowed_roles": "readonly", + "connection_url": "root:mysql@tcp(127.0.0.1:3306)/" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/database/config/mysql +``` + +## Read Connection + +This endpoint returns the configuration settings for a connection. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/database/config/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the connection to read. + This is specified as part of the URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request GET \ + https://vault.rocks/v1/database/config/mysql +``` + +### Sample Response + +```json +{ + "data": { + "allowed_roles": [ + "readonly" + ], + "connection_details": { + "connection_url": "root:mysql@tcp(127.0.0.1:3306)/", + }, + "plugin_name": "mysql-database-plugin" + }, +} +``` + +## Delete Connection + +This endpoint deletes a connection. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/database/config/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the connection to delete. + This is specified as part of the URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/database/config/mysql +``` + +## Reset Connection + +This endpoint closes a connection and it's underlying plugin and restarts it +with the configuration stored in the barrier. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/database/reset/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the connection to delete. + This is specified as part of the URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + https://vault.rocks/v1/database/reset/mysql +``` + +## Create Role + +This endpoint creates or updates a role definition. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/database/roles/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the role to create. This + is specified as part of the URL. + +- `db_name` `(string: )` - The name of the database connection to use + for this role. + +- `default_ttl` `(string/int: 0)` - Specifies the TTL for the leases + associated with this role. Accepts time suffixed strings ("1h") or an integer + number of seconds. Defaults to system/backend default TTL time. + +- `max_ttl` `(string/int: 0)` - Specifies the maximum TTL for the leases + associated with this role. Accepts time suffixed strings ("1h") or an integer + number of seconds. Defaults to system/backend default TTL time. + +- `creation_statements` `(string: )` – Specifies the database + statements executed to create and configure a user. Must be a + semicolon-separated string, a base64-encoded semicolon-separated string, a + serialized JSON string array, or a base64-encoded serialized JSON string + array. The '{{name}}', '{{password}}' and '{{expiration}}' values will be + substituted. + +- `revocation_statements` `(string: "")` – Specifies the database statements to + be executed to revoke a user. Must be a semicolon-separated string, a + base64-encoded semicolon-separated string, a serialized JSON string array, or + a base64-encoded serialized JSON string array. The '{{name}}' value will be + substituted. + +- `rollback_statements` `(string: "")` – Specifies the database statements to be + executed rollback a create operation in the event of an error. Not every + plugin type will support this functionality. Must be a semicolon-separated + string, a base64-encoded semicolon-separated string, a serialized JSON string + array, or a base64-encoded serialized JSON string array. The '{{name}}' value + will be substituted. + +- `renew_statements` `(string: "")` – Specifies the database statements to be + executed to renew a user. Not every plugin type will support this + functionality. Must be a semicolon-separated string, a base64-encoded + semicolon-separated string, a serialized JSON string array, or a + base64-encoded serialized JSON string array. The '{{name}}' and + '{{expiration}}` values will be substituted. + + +### Sample Payload + +```json +{ + "db_name": "mysql", + "creation_statements": "CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';GRANT SELECT ON *.* TO '{{name}}'@'%';", + "default_ttl": "1h", + "max_ttl": "24h" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/database/roles/my-role +``` + +## Read Role + +This endpoint queries the role definition. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/database/roles/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the role to read. This + is specified as part of the URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/database/roles/my-role +``` + +### Sample Response + +```json +{ + "data": { + "creation_statements": "CREATE ROLE \"{{name}}\" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'; GRANT SELECT ON ALL TABLES IN SCHEMA public TO \"{{name}}\";", + "db_name": "mysql", + "default_ttl": 3600, + "max_ttl": 86400, + "renew_statements": "", + "revocation_statements": "", + "rollback_statements": "" + }, +} +``` + +## List Roles + +This endpoint returns a list of available roles. Only the role names are +returned, not any values. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/database/roles` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/database/roles +``` + +### Sample Response + +```json +{ + "auth": null, + "data": { + "keys": ["dev", "prod"] + }, + "lease_duration": 2764800, + "lease_id": "", + "renewable": false +} +``` + +## Delete Role + +This endpoint deletes the role definition. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/database/roles/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the role to delete. This + is specified as part of the URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/database/roles/my-role +``` + +## Generate Credentials + +This endpoint generates a new set of dynamic credentials based on the named +role. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/database/creds/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the role to create + credentials against. This is specified as part of the URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/database/creds/my-role +``` + +### Sample Response + +```json +{ + "data": { + "username": "root-1430158508-126", + "password": "132ae3ef-5a64-7499-351e-bfe59f3a2a21" + } +} +``` diff --git a/website/source/api/secret/databases/mssql.html.md b/website/source/api/secret/databases/mssql.html.md new file mode 100644 index 000000000..09893df45 --- /dev/null +++ b/website/source/api/secret/databases/mssql.html.md @@ -0,0 +1,60 @@ +--- +layout: "api" +page_title: "MSSQL Database Plugin - HTTP API" +sidebar_current: "docs-http-secret-databases-mssql-maria" +description: |- + The MSSQL plugin for Vault's Database backend generates database credentials to access MSSQL servers. +--- + +# MSSQL Database Plugin HTTP API + +The MSSQL Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the MSSQL database. + +## Configure Connection + +In addition to the parameters defined by the [Database +Backend](/api/secret/databases/index.html#configure-connection), this plugin +has a number of parameters to further configure a connection. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/database/config/:name` | `204 (empty body)` | + +### Parameters +- `connection_url` `(string: )` - Specifies the MSSQL DSN. + +- `max_open_connections` `(int: 2)` - Speficies the name of the plugin to use + for this connection. + +- `max_idle_connections` `(int: 0)` - Specifies the maximum number of idle + connections to the database. A zero uses the value of `max_open_connections` + and a negative value disables idle connections. If larger than + `max_open_connections` it will be reduced to be equal. + +- `max_connection_lifetime` `(string: "0s")` - Specifies the maximum amount of + time a connection may be reused. If <= 0s connections are reused forever. + +### Sample Payload + +```json +{ + "plugin_name": "mssql-database-plugin", + "allowed_roles": "readonly", + "connection_url": "sqlserver://sa:yourStrong(!)Password@localhost:1433", + "max_open_connections": 5, + "max_connection_lifetime": "5s", +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/database/config/mssql +``` + diff --git a/website/source/api/secret/databases/mysql-maria.html.md b/website/source/api/secret/databases/mysql-maria.html.md new file mode 100644 index 000000000..981506798 --- /dev/null +++ b/website/source/api/secret/databases/mysql-maria.html.md @@ -0,0 +1,60 @@ +--- +layout: "api" +page_title: "MySQL/MariaDB Database Plugin - HTTP API" +sidebar_current: "docs-http-secret-databases-mysql-maria" +description: |- + The MySQL/MariaDB plugin for Vault's Database backend generates database credentials to access MySQL and MariaDB servers. +--- + +# MySQL/MariaDB Database Plugin HTTP API + +The MySQL Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the MySQL database. + +## Configure Connection + +In addition to the parameters defined by the [Database +Backend](/api/secret/databases/index.html#configure-connection), this plugin +has a number of parameters to further configure a connection. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/database/config/:name` | `204 (empty body)` | + +### Parameters +- `connection_url` `(string: )` - Specifies the MySQL DSN. + +- `max_open_connections` `(int: 2)` - Speficies the name of the plugin to use + for this connection. + +- `max_idle_connections` `(int: 0)` - Specifies the maximum number of idle + connections to the database. A zero uses the value of `max_open_connections` + and a negative value disables idle connections. If larger than + `max_open_connections` it will be reduced to be equal. + +- `max_connection_lifetime` `(string: "0s")` - Specifies the maximum amount of + time a connection may be reused. If <= 0s connections are reused forever. + +### Sample Payload + +```json +{ + "plugin_name": "mysql-database-plugin", + "allowed_roles": "readonly", + "connection_url": "root:mysql@tcp(127.0.0.1:3306)/" + "max_open_connections": 5, + "max_connection_lifetime": "5s", +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/database/config/mysql +``` + diff --git a/website/source/api/secret/databases/postgresql.html.md b/website/source/api/secret/databases/postgresql.html.md new file mode 100644 index 000000000..5ff6b8022 --- /dev/null +++ b/website/source/api/secret/databases/postgresql.html.md @@ -0,0 +1,60 @@ +--- +layout: "api" +page_title: "PostgreSQL Database Plugin - HTTP API" +sidebar_current: "docs-http-secret-databases-postgresql-maria" +description: |- + The PostgreSQL plugin for Vault's Database backend generates database credentials to access PostgreSQL servers. +--- + +# PostgreSQL Database Plugin HTTP API + +The PostgreSQL Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the PostgreSQL database. + +## Configure Connection + +In addition to the parameters defined by the [Database +Backend](/api/secret/databases/index.html#configure-connection), this plugin +has a number of parameters to further configure a connection. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/database/config/:name` | `204 (empty body)` | + +### Parameters +- `connection_url` `(string: )` - Specifies the PostgreSQL DSN. + +- `max_open_connections` `(int: 2)` - Speficies the name of the plugin to use + for this connection. + +- `max_idle_connections` `(int: 0)` - Specifies the maximum number of idle + connections to the database. A zero uses the value of `max_open_connections` + and a negative value disables idle connections. If larger than + `max_open_connections` it will be reduced to be equal. + +- `max_connection_lifetime` `(string: "0s")` - Specifies the maximum amount of + time a connection may be reused. If <= 0s connections are reused forever. + +### Sample Payload + +```json +{ + "plugin_name": "postgresql-database-plugin", + "allowed_roles": "readonly", + "connection_url": "postgresql://root:root@localhost:5432/postgres", + "max_open_connections": 5, + "max_connection_lifetime": "5s", +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/database/config/postgresql +``` + diff --git a/website/source/api/system/plugins-catalog.html.md b/website/source/api/system/plugins-catalog.html.md new file mode 100644 index 000000000..b95526194 --- /dev/null +++ b/website/source/api/system/plugins-catalog.html.md @@ -0,0 +1,155 @@ +--- +layout: "api" +page_title: "/sys/plugins/catalog - HTTP API" +sidebar_current: "docs-http-system-plugins-catalog" +description: |- + The `/sys/plugins/catalog` endpoint is used to manage plugins. +--- + +# `/sys/plugins/catalog` + +The `/sys/plugins/catalog` endpoint is used to list, register, update, and +remove plugins in Vault's catalog. Plugins must be registered before use, and +once registered backends can use the plugin by querying the catalog. + +## List Plugins + +This endpoint lists the plugins in the catalog. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/sys/plugins/catalog/` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST + https://vault.rocks/v1/sys/plugins/catalog +``` + +### Sample Response + +```javascript +{ + "data": { + "keys": [ + "cassandra-database-plugin", + "mssql-database-plugin", + "mysql-database-plugin", + "postgresql-database-plugin" + ] + } +} +``` + +## Register Plugin + +This endpoint registers a new plugin, or updates an existing one with the +supplied name. + +- **`sudo` required** – This endpoint requires `sudo` capability in addition to + any path-specific capabilities. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `PUT` | `/sys/plugins/catalog/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name for this plugin. The name + is what is used to look up plugins in the catalog. This is part of the request + URL. + +- `sha_256` `(string: )` – This is the SHA256 sum of the plugin's + binary. Before a plugin is run it's SHA will be checked against this value, if + they do not match the plugin can not be run. + +- `command` `(string: )` – Specifies the command used to execute the + plugin. This is relative to the plugin directory. e.g. `"myplugin + --my_flag=1"` + +### Sample Payload + +```json +{ + "sha_256": "d130b9a0fbfddef9709d8ff92e5e6053ccd246b78632fc03b8548457026961e9", + "command": "mysql-database-plugin" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request PUT \ + --data @payload.json \ + https://vault.rocks/v1/sys/plugins/catalog/example-plugin +``` + +## Read Plugin + +This endpoint returns the configuration data for the plugin with the given name. + +- **`sudo` required** – This endpoint requires `sudo` capability in addition to + any path-specific capabilities. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/sys/plugins/catalog/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the plugin to retrieve. + This is part of the request URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request GET \ + https://vault.rocks/v1/sys/plugins/catalog/example-plugin +``` + +### Sample Response + +```javascript +{ + "data": { + "plugin": { + "args": [], + "builtin": false, + "command": "/tmp/vault-plugins/mysql-database-plugin", + "name": "example-plugin", + "sha256": "0TC5oPv93vlwnY/5Ll5gU8zSRreGMvwDuFSEVwJpYek=" + } + } +} +``` +## Remove Plugin from Catalog + +This endpoint removes the plugin with the given name. + +- **`sudo` required** – This endpoint requires `sudo` capability in addition to + any path-specific capabilities. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/sys/plugins/catalog/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the plugin to delete. + This is part of the request URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/sys/plugins/catalog/example-plugin +``` diff --git a/website/source/docs/internals/plugins.html.md b/website/source/docs/internals/plugins.html.md new file mode 100644 index 000000000..600bc034e --- /dev/null +++ b/website/source/docs/internals/plugins.html.md @@ -0,0 +1,105 @@ +--- +layout: "docs" +page_title: "Plugin System" +sidebar_current: "docs-internals-plugins" +description: |- + Learn about Vault's plugin system. +--- + +# Plugin System +Certain Vault backends utilize plugins to extend their functionality outside of +what is available in the core vault code. Often times these backends will +provide both builtin plugins and a mechanism for executing external plugins. +Builtin plugins are shipped with vault, often for commonly used implementations, +and require no additional operator intervention to run. Builtin plugins are +just like any other backend code inside vault. External plugins, on the other +hand, are not shipped with the vault binary and must be registered to vault by +a privileged vault user. This section of the documentation will describe the +architecture and security of external plugins. + +# Plugin Architecture +Vault's plugins are completely separate, standalone applications that Vault +executes and communicates with over RPC. This means the plugin process does not +share the same memory space as Vault and therefore can only access the +interfaces and arguments given to it. This also means a crash in a plugin can not +crash the entirety of Vault. + +## Plugin Communication +Vault creates a mutually authenticated TLS connection for communication with the +plugin's RPC server. While invoking the plugin process Vault passes a [wrapping +token](https://www.vaultproject.io/docs/concepts/response-wrapping.html) to the +plugin process' environment. This token is single use and has a short TTL. Once +unwrapped, it provides the plugin with a unique generated TLS certificate and +private key for it to use to talk to the original vault process. + +## Plugin Registration +An important consideration of Vault's plugin system is to ensure the plugin +invoked by vault is authentic and maintains integrity. There are two components +that a Vault operator needs to configure before external plugins can be run, the +plugin directory and the plugin catalog entry. + +### Plugin Directory +The plugin directory is a configuration option of Vault, and can be specified in +the [configuration file](https://www.vaultproject.io/docs/configuration/index.html). +This setting specifies a directory that all plugin binaries must live. A plugin +can not be added to vault unless it exists in the plugin directory. There is no +default for this configuration option, and if it is not set plugins can not be +added to vault. + +~> Warning: A vault operator should take care to lock down the permissions on +this directory to ensure a plugin can not be modified by an unauthorized user +between the time of the SHA check and the time of plugin execution. + +### Plugin Catalog +The plugin catalog is Vault's list of approved plugins. The catalog is stored in +Vault's barrier and can only be updated by a vault user with sudo permissions. +Upon adding a new plugin, the plugin name, SHA256 sum of the executable, and the +command that should be used to run the plugin must be provided. The catalog will +make sure the executable referenced in the command exists in the plugin +directory. When added to the catalog the plugin is not automatically executed, +it instead becomes visible to backends and can be executed by them. + +### Plugin Execution +When a backend wants to run a plugin, it first looks up the plugin, by name, in +the catalog. It then checks the executable's SHA256 sum against the one +configured in the plugin catalog. Finally vault runs the command configured in +the catalog, sending along the JWT formatted response wrapping token and mlock +settings (like Vault, plugins support the use of mlock when availible). + +# Plugin Development +Because Vault communicates to plugins over a RPC interface, you can build and +distribute a plugin for Vault without having to rebuild Vault itself. This makes +it easy for you to build a Vault plugin for your organization's internal use, +for a proprietary API that you don't want to open source, or to prototype +something before contributing it back to the main project. + +In theory, because the plugin interface is HTTP, you could even develop a plugin +using a completely different programming language! (Disclaimer, you would also +have to re-implement the plugin API which is not a trivial amount of work.) + +~> Advanced topic! Plugin development is a highly advanced topic in Vault, and +is not required knowledge for day-to-day usage. If you don't plan on writing any +plugins, we recommend not reading this section of the documentation. + +Developing a plugin is simple. The only knowledge necessary to write +a plugin is basic command-line skills and basic knowledge of the +[Go programming language](http://golang.org). + +You're plugin implementation just needs to satisfy the interface for the plugin +type you want to build. You can find these definitions in the docs for the +backend running the plugin. + +```go +package main + +import ( + "github.com/hashicorp/vault/plugins" +) + +func main() { + plugins.Serve(new(MyPlugin), nil) +} +``` + +And that's basically it! You would just need to change MyPlugin to your actual +plugin. diff --git a/website/source/docs/secrets/databases/cassandra.html.md b/website/source/docs/secrets/databases/cassandra.html.md new file mode 100644 index 000000000..0e29d0300 --- /dev/null +++ b/website/source/docs/secrets/databases/cassandra.html.md @@ -0,0 +1,62 @@ +--- +layout: "docs" +page_title: "Cassandra Database Plugin" +sidebar_current: "docs-secrets-databases-cassandra" +description: |- + The Cassandra plugin for Vault's Database backend generates database credentials to access Cassandra. +--- + +# Cassandra Database Plugin + +Name: `cassandra-database-plugin` + +The Cassandra Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the Cassandra database. + +See the [Database Backend](/docs/secrets/databases/index.html) docs for more +information about setting up the Database Backend. + +## Quick Start + +After the Database Backend is mounted you can configure a cassandra connection +by specifying this plugin as the `"plugin_name"` argument. Here is an example +cassandra configuration: + +``` +$ vault write database/config/cassandra \ + plugin_name=cassandra-database-plugin \ + allowed_roles="readonly" \ + hosts=localhost \ + username=cassandra \ + password=cassandra + +The following warnings were returned from the Vault server: +* Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any. +``` + +Once the cassandra connection is configured we can add a role: + +``` +$ vault write database/roles/readonly \ + db_name=cassandra \ + creation_statements="CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER; \ + GRANT SELECT ON ALL KEYSPACES TO {{username}};" \ + default_ttl="1h" \ + max_ttl="24h" + + +Success! Data written to: database/roles/readonly +``` + +This role can be used to retrieve a new set of credentials by querying the +"database/creds/readonly" endpoint. + +## API + +The full list of configurable options can be seen in the [Cassandra database +plugin API](/api/secret/databases/cassandra.html) page. + +For more information on the Database secret backend's HTTP API please see the [Database secret +backend API](/api/secret/databases/index.html) page. + diff --git a/website/source/docs/secrets/databases/custom.html.md b/website/source/docs/secrets/databases/custom.html.md new file mode 100644 index 000000000..7d21a19c9 --- /dev/null +++ b/website/source/docs/secrets/databases/custom.html.md @@ -0,0 +1,121 @@ +--- +layout: "docs" +page_title: "Custom Database Plugins" +sidebar_current: "docs-secrets-databases-custom" +description: |- + Creating custom database plugins for Vault's Database backend to generate credentials for a database. +--- + +# Custom Database Plugins + +The Database backend allows new functionality to be added through a plugin +interface without needing to modify vault's core code. This allows you write +your own code to generate credentials in any database you wish. It also allows +databases that require dynamically linked libraries to be used as plugins while +keeping Vault itself statically linked. + +~> **Advanced topic!** Plugin development is a highly advanced +topic in Vault, and is not required knowledge for day-to-day usage. +If you don't plan on writing any plugins, we recommend not reading +this section of the documentation. + +Please read the [Plugins internals](/docs/internals/plugins.html) docs for more +information about the plugin system before getting started building your +Database plugin. + +## Plugin Interface + +All plugins for the Database backend must implement the same simple interface. + +```go +type Database interface { + Type() (string, error) + CreateUser(statements Statements, usernamePrefix string, expiration time.Time) (username string, password string, err error) + RenewUser(statements Statements, username string, expiration time.Time) error + RevokeUser(statements Statements, username string) error + + Initialize(config map[string]interface{}, verifyConnection bool) error + Close() error +} +``` + +You'll notice the first parameter to a number of those functions is a +`Statements` struct. This struct is used to pass the Role's configured +statements to the plugin on function call. The struct is defined as: + +```go +type Statements struct { + CreationStatements string + RevocationStatements string + RollbackStatements string + RenewStatements string +} +``` + +It is up to your plugin to replace the `{{name}}`, `{{password}}`, and +`{{expiration}}` in these statements with the proper vaules. + +The `Initialize` function is passed a map of keys to values, this data is what the +user specified as the configuration for the plugin. Your plugin should use this +data to make connections to the database. It is also passed a boolean value +specifying whether or not your plugin should return an error if it is unable to +connect to the database. + +## Serving your plugin + +Once your plugin is built you should pass it to vault's `plugins` package by +calling the `Serve` method: + +```go +package main + +import ( + "github.com/hashicorp/vault/plugins" +) + +func main() { + plugins.Serve(new(MyPlugin), nil) +} +``` + +Replacing `MyPlugin` with the actual implementation of your plugin. + +The second parameter to `Serve` takes in an optional vault `api.TLSConfig` for +configuring the plugin to communicate with vault for the initial unwrap call. +This is useful if your vault setup requires client certificate checks. This +config wont be used once the plugin unwraps its own TLS cert and key. + +## Running your plugin + +The above main package, once built, will supply you with a binary of your +plugin. We also recommend if you are planning on distributing your plugin to +build with [gox](https://github.com/mitchellh/gox) for cross platform builds. + +To use your plugin with the Database backend you need to place the binary in the +plugin directory as specified in the [plugin internals](/docs/internals/plugins.html) docs. + +You should now be able to register your plugin into the vault catalog. To do +this your token will need sudo permissions. + +``` +$ vault write sys/plugins/catalog/myplugin-database-plugin \ + sha_256= \ + command="myplugin" +Success! Data written to: sys/plugins/catalog/myplugin-database-plugin +``` + +Now you should be able to configure your plugin like any other: + +``` +$ vault write database/config/myplugin \ + plugin_name=myplugin-database-plugin \ + allowed_roles="readonly" \ + myplugins_connection_details=.... + +The following warnings were returned from the Vault server: +* Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any. +``` + + + + diff --git a/website/source/docs/secrets/databases/index.html.md b/website/source/docs/secrets/databases/index.html.md new file mode 100644 index 000000000..c88699c44 --- /dev/null +++ b/website/source/docs/secrets/databases/index.html.md @@ -0,0 +1,97 @@ +--- +layout: "docs" +page_title: "Databases" +sidebar_current: "docs-secrets-databases" +description: |- + Top page for database secret backend information +--- + +# Databases + +Name: `Database` + +The Database secret backend for Vault generates database credentials dynamically +based on configured roles. It works with a number of different databases through +a plugin interface. There are a number of builtin database types and an exposed +framework for running custom database types for extendability. This means that +services that need to access a database no longer need to hardcode credentials: +they can request them from Vault, and use Vault's leasing mechanism to more +easily roll keys. + +Additionally, it introduces a new ability: with every service accessing the +database with unique credentials, it makes auditing much easier when +questionable data access is discovered: you can track it down to the specific +instance of a service based on the SQL username. + +Vault makes use of its own internal revocation system to ensure that users +become invalid within a reasonable time of the lease expiring. + +This page will show a quick start for this backend. For detailed documentation +on every path, use vault path-help after mounting the backend. + +## Quick Start + +The first step in using the Database backend is mounting it. + +```text +$ vault mount database +Successfully mounted 'database' at 'database'! +``` + +Next, we must configure this backend to connect to a database. In this example +we will connect to a MySQL database, but the configuration details needed for +other plugin types can be found in their docs pages. This backend can configure +multiple database connections, therefore a name for the connection must be +provide; we'll call this one simply "mysql". + +``` +$ vault write database/config/mysql \ + plugin_name=mysql-database-plugin \ + connection_url="root:mysql@tcp(127.0.0.1:3306)/" \ + allowed_roles="readonly" + +The following warnings were returned from the Vault server: +* Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any. +``` + +The next step is to configure a role. A role is a logical name that maps to a +policy used to generate those credentials. A role needs to be configured with +the database name we created above, and the default/max TTLs. For example, lets +create a "readonly" role: + +``` +$ vault write database/roles/readonly \ + db_name=mysql \ + creation_statements="CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';GRANT SELECT ON *.* TO '{{name}}'@'%';" \ + default_ttl="1h" \ + max_ttl="24h" +Success! Data written to: database/roles/readonly +``` +By writing to the roles/readonly path we are defining the readonly role. This +role will be created by evaluating the given creation statements. By default, +the {{name}} and {{password}} fields will be populated by the plugin with +dynamically generated values. In other plugins the {{expiration}} field could +also be supported. This SQL statement is creating the named user, and then +granting it SELECT or read-only privileges to tables in the database. More +complex GRANT queries can be used to customize the privileges of the role. +Custom revocation statements could be passed too, but this plugin has a default +statement we can use. + +To generate a new set of credentials, we simply read from that role: + +``` +$ vault read database/creds/readonly +Key Value +--- ----- +lease_id database/creds/readonly/2f6a614c-4aa2-7b19-24b9-ad944a8d4de6 +lease_duration 1h0m0s +lease_renewable true +password 8cab931c-d62e-a73d-60d3-5ee85139cd66 +username v-root-e2978cd0- +``` + +## API + +The Database secret backend has a full HTTP API. Please see the [Database secret +backend API](/api/secret/databases/index.html) for more details. + diff --git a/website/source/docs/secrets/databases/mssql.html.md b/website/source/docs/secrets/databases/mssql.html.md new file mode 100644 index 000000000..889e35a4f --- /dev/null +++ b/website/source/docs/secrets/databases/mssql.html.md @@ -0,0 +1,60 @@ +--- +layout: "docs" +page_title: "MSSQL Database Plugin" +sidebar_current: "docs-secrets-databases-mssql" +description: |- + The MSSQL plugin for Vault's Database backend generates database credentials to access Microsoft SQL Server. +--- + +# MSSQL Database Plugin + +Name: `mssql-database-plugin` + +The MSSQL Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the MSSQL database. + +See the [Database Backend](/docs/secrets/databases/index.html) docs for more +information about setting up the Database Backend. + +## Quick Start + +After the Database Backend is mounted you can configure a MSSQL connection +by specifying this plugin as the `"plugin_name"` argument. Here is an example +configuration: + +``` +$ vault write database/config/mssql \ + plugin_name=mssql-database-plugin \ + connection_url='sqlserver://sa:yourStrong(!)Password@localhost:1433' \ + allowed_roles="readonly" + +The following warnings were returned from the Vault server: +* Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any. +``` + +Once the MSSQL connection is configured we can add a role: + +``` +$ vault write database/roles/readonly \ + db_name=mssql \ + creation_statements="CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}';\ + CREATE USER [{{name}}] FOR LOGIN [{{name}}];\ + GRANT SELECT ON SCHEMA::dbo TO [{{name}}];" \ + default_ttl="1h" \ + max_ttl="24h" + +Success! Data written to: database/roles/readonly +``` + +This role can now be used to retrieve a new set of credentials by querying the +"database/creds/readonly" endpoint. + +## API + +The full list of configurable options can be seen in the [MSSQL database +plugin API](/api/secret/databases/mssql.html) page. + +For more information on the Database secret backend's HTTP API please see the [Database secret +backend API](/api/secret/databases/index.html) page. + diff --git a/website/source/docs/secrets/databases/mysql-maria.html.md b/website/source/docs/secrets/databases/mysql-maria.html.md new file mode 100644 index 000000000..0d7c49748 --- /dev/null +++ b/website/source/docs/secrets/databases/mysql-maria.html.md @@ -0,0 +1,69 @@ +--- +layout: "docs" +page_title: "MySQL/MariaDB Database Plugin" +sidebar_current: "docs-secrets-databases-mysql-maria" +description: |- + The MySQL/MariaDB plugin for Vault's Database backend generates database credentials to access MySQL and MariaDB servers. +--- + +# MySQL/MariaDB Database Plugin + +Name: `mysql-database-plugin`, `mysql-aurora-database-plugin`, `mysql-rds-database-plugin`, +`mysql-legacy-database-plugin` + +The MySQL Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the MySQL database. + +This plugin has a few different instances built into vault, each instance is for +a slightly different MySQL driver. The only difference between these plugins is +the length of usernames generated by the plugin as different versions of mysql +accept different lengths. The availible plugins are: + + - mysql-database-plugin + - mysql-aurora-database-plugin + - mysql-rds-database-plugin + - mysql-legacy-database-plugin + +See the [Database Backend](/docs/secrets/databases/index.html) docs for more +information about setting up the Database Backend. + +## Quick Start + +After the Database Backend is mounted you can configure a MySQL connection +by specifying this plugin as the `"plugin_name"` argument. Here is an example +configuration: + +``` +$ vault write database/config/mysql \ + plugin_name=mysql-database-plugin \ + connection_url="root:mysql@tcp(127.0.0.1:3306)/" \ + allowed_roles="readonly" + +The following warnings were returned from the Vault server: +* Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any. +``` + +Once the MySQL connection is configured we can add a role: + +``` +$ vault write database/roles/readonly \ + db_name=mysql \ + creation_statements="CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';GRANT SELECT ON *.* TO '{{name}}'@'%';" \ + default_ttl="1h" \ + max_ttl="24h" + +Success! Data written to: database/roles/readonly +``` + +This role can now be used to retrieve a new set of credentials by querying the +"database/creds/readonly" endpoint. + +## API + +The full list of configurable options can be seen in the [MySQL database +plugin API](/api/secret/databases/mysql-maria.html) page. + +For more information on the Database secret backend's HTTP API please see the [Database secret +backend API](/api/secret/databases/index.html) page. + diff --git a/website/source/docs/secrets/databases/postgresql.html.md b/website/source/docs/secrets/databases/postgresql.html.md new file mode 100644 index 000000000..b2c0c7bb2 --- /dev/null +++ b/website/source/docs/secrets/databases/postgresql.html.md @@ -0,0 +1,60 @@ +--- +layout: "docs" +page_title: "PostgreSQL Database Plugin" +sidebar_current: "docs-secrets-databases-postgresql" +description: |- + The PostgreSQL plugin for Vault's Database backend generates database credentials to access PostgreSQL. +--- + +# PostgreSQL Database Plugin + +Name: `postgresql-database-plugin` + +The PostgreSQL Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the PostgreSQL database. + +See the [Database Backend](/docs/secrets/databases/index.html) docs for more +information about setting up the Database Backend. + +## Quick Start + +After the Database Backend is mounted you can configure a PostgreSQL connection +by specifying this plugin as the `"plugin_name"` argument. Here is an example +configuration: + +``` +$ vault write database/config/postgresql \ + plugin_name=postgresql-database-plugin \ + allowed_roles="readonly" \ + connection_url="postgresql://root:root@localhost:5432/" + +The following warnings were returned from the Vault server: +* Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any. +``` + +Once the PostgreSQL connection is configured we can add a role. The PostgreSQL +plugin replaces `{{expiration}}` in statements with a formated timestamp: + +``` +$ vault write database/roles/readonly \ + db_name=postgresql \ + creation_statements="CREATE ROLE \"{{name}}\" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'; \ + GRANT SELECT ON ALL TABLES IN SCHEMA public TO \"{{name}}\";" \ + default_ttl="1h" \ + max_ttl="24h" + +Success! Data written to: database/roles/readonly +``` + +This role can be used to retrieve a new set of credentials by querying the +"database/creds/readonly" endpoint. + +## API + +The full list of configurable options can be seen in the [PostgreSQL database +plugin API](/api/secret/databases/postgresql.html) page. + +For more information on the Database secret backend's HTTP API please see the [Database secret +backend API](/api/secret/databases/index.html) page. + diff --git a/website/source/layouts/api.erb b/website/source/layouts/api.erb index 989c60bd8..799e461d0 100644 --- a/website/source/layouts/api.erb +++ b/website/source/layouts/api.erb @@ -21,7 +21,7 @@ AWS > - Cassandra + Cassandra (Deprecated) > Consul @@ -29,6 +29,25 @@ > Cubbyhole + + > + Databases (Beta) + + + > Generic @@ -36,16 +55,16 @@ MongoDB > - MSSQL + MSSQL (Deprecated) > - MySQL + MySQL (Deprecated) > PKI > - PostgreSQL + PostgreSQL (Deprecated) > RabbitMQ @@ -107,6 +126,9 @@ > /sys/mounts + > + /sys/plugins/catalog + > /sys/policy diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 3ff5dc531..ac6d96cbb 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -35,6 +35,10 @@ > Replication + + > + Plugins + @@ -204,7 +208,7 @@ > - Cassandra + Cassandra (Deprecated) > @@ -215,6 +219,27 @@ Cubbyhole + > + Databases (Beta) + + + > Generic @@ -224,11 +249,11 @@ > - MSSQL + MSSQL (Deprecated) > - MySQL + MySQL (Deprecated) > @@ -236,7 +261,7 @@ > - PostgreSQL + PostgreSQL (Deprecated) >