diff --git a/Makefile b/Makefile index b5f77760c..68a091393 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ SED?=$(shell command -v gsed || command -v sed) GO_VERSION_MIN=$$(cat $(CURDIR)/.go-version) -PROTOC_VERSION_MIN=3.21.7 +PROTOC_VERSION_MIN=3.21.9 GO_CMD?=go CGO_ENABLED?=0 ifneq ($(FDB_ENABLED), ) @@ -186,6 +186,8 @@ proto: bootstrap protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/plugin/pb/*.proto protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/tokens/token.proto protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/helper/pluginutil/*.proto + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/hcp_link/capabilities/meta/*.proto + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/hcp_link/capabilities/link_control/*.proto protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/hcp_link/proto/node_status/*.proto # No additional sed expressions should be added to this list. Going forward diff --git a/changelog/18228.txt b/changelog/18228.txt new file mode 100644 index 000000000..4f1b6d104 --- /dev/null +++ b/changelog/18228.txt @@ -0,0 +1,3 @@ +```release-note:improvement +hcp/connectivity: Add foundational OSS support for opt-in secure communication between self-managed Vault nodes and [HashiCorp Cloud Platform](https://cloud.hashicorp.com) +``` diff --git a/command/server.go b/command/server.go index 9f070a6c0..15c379364 100644 --- a/command/server.go +++ b/command/server.go @@ -1708,7 +1708,7 @@ func (c *ServerCommand) configureLogging(config *server.Config) (hclog.Intercept return loghelper.Setup(logCfg, c.logWriter) } -func (c *ServerCommand) reloadHCPLink(hcpLinkVault *hcp_link.WrappedHCPLinkVault, conf *server.Config, core *vault.Core, hcpLogger hclog.Logger) (*hcp_link.WrappedHCPLinkVault, error) { +func (c *ServerCommand) reloadHCPLink(hcpLinkVault *hcp_link.HCPLinkVault, conf *server.Config, core *vault.Core, hcpLogger hclog.Logger) (*hcp_link.HCPLinkVault, error) { // trigger a shutdown if hcpLinkVault != nil { err := hcpLinkVault.Shutdown() diff --git a/command/server/hcp_link_config_test.go b/command/server/hcp_link_config_test.go new file mode 100644 index 000000000..f71c96d76 --- /dev/null +++ b/command/server/hcp_link_config_test.go @@ -0,0 +1,47 @@ +package server + +import ( + "testing" + + "github.com/go-test/deep" + sdkResource "github.com/hashicorp/hcp-sdk-go/resource" + "github.com/hashicorp/vault/internalshared/configutil" +) + +func TestHCPLinkConfig(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/hcp_link_config.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + resIDRaw := "organization/bc58b3d0-2eab-4ab8-abf4-f61d3c9975ff/project/1c78e888-2142-4000-8918-f933bbbc7690/hashicorp.example.resource/example" + res, _ := sdkResource.FromString(resIDRaw) + + expected := &Config{ + Storage: &Storage{ + Type: "inmem", + Config: map[string]string{}, + }, + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:8200", + TLSDisable: true, + CustomResponseHeaders: DefaultCustomHeaders, + }, + }, + HCPLinkConf: &configutil.HCPLinkConfig{ + ResourceIDRaw: resIDRaw, + Resource: &res, + ClientID: "J2TtcSYOyPUkPV2z0mSyDtvitxLVjJmu", + ClientSecret: "N9JtHZyOnHrIvJZs82pqa54vd4jnkyU3xCcqhFXuQKJZZuxqxxbP1xCfBZVB82vY", + }, + DisableMlock: true, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} diff --git a/command/server/test-fixtures/hcp_link_config.hcl b/command/server/test-fixtures/hcp_link_config.hcl new file mode 100644 index 000000000..fc25b760e --- /dev/null +++ b/command/server/test-fixtures/hcp_link_config.hcl @@ -0,0 +1,11 @@ +storage "inmem" {} +listener "tcp" { + address = "127.0.0.1:8200" + tls_disable = true +} +cloud { + resource_id = "organization/bc58b3d0-2eab-4ab8-abf4-f61d3c9975ff/project/1c78e888-2142-4000-8918-f933bbbc7690/hashicorp.example.resource/example" + client_id = "J2TtcSYOyPUkPV2z0mSyDtvitxLVjJmu" + client_secret = "N9JtHZyOnHrIvJZs82pqa54vd4jnkyU3xCcqhFXuQKJZZuxqxxbP1xCfBZVB82vY" +} +disable_mlock = true \ No newline at end of file diff --git a/go.mod b/go.mod index 2c1e38204..c6bd0f293 100644 --- a/go.mod +++ b/go.mod @@ -326,11 +326,15 @@ require ( github.com/hashicorp/go-secure-stdlib/fileutil v0.1.0 // indirect github.com/hashicorp/go-slug v0.7.0 // indirect github.com/hashicorp/go-tfe v0.20.0 // indirect + github.com/hashicorp/hcp-link v0.1.0 // indirect + github.com/hashicorp/hcp-scada-provider v0.1.0 // indirect github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/mdns v1.0.4 // indirect + github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 // indirect github.com/hashicorp/serf v0.9.7 // indirect github.com/hashicorp/vault/api/auth/kubernetes v0.3.0 // indirect + github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20221202210228-12b2fab87559 // indirect github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 // indirect github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect github.com/huandu/xstrings v1.3.2 // indirect @@ -363,6 +367,7 @@ require ( github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mediocregopher/radix/v4 v4.1.1 // indirect github.com/miekg/dns v1.1.41 // indirect + github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/hashstructure v1.1.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/moby/sys/mount v0.2.0 // indirect @@ -391,6 +396,7 @@ require ( github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 // indirect github.com/rogpeppe/go-internal v1.8.1 // indirect github.com/sirupsen/logrus v1.9.0 // indirect + github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/snowflakedb/gosnowflake v1.6.3 // indirect github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d // indirect github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b // indirect diff --git a/go.sum b/go.sum index 76c90a41a..5276698c6 100644 --- a/go.sum +++ b/go.sum @@ -1080,6 +1080,10 @@ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/hcp-link v0.1.0 h1:F6F1cpADc+o5EBI5CbJn5RX4qdFSLpuA4fN69eeE5lQ= +github.com/hashicorp/hcp-link v0.1.0/go.mod h1:BWVDuJDHrKJtWc5qI07bX5xlLjSgWq6kYLQUeG1g5dM= +github.com/hashicorp/hcp-scada-provider v0.1.0 h1:FSjTw7EBl6GJFv5533harm1vw15OaEYodNGHde908MI= +github.com/hashicorp/hcp-scada-provider v0.1.0/go.mod h1:8Pp3pBLzZ9DL56OHSbf55qhh+TpvmXBuR5cJx9jcdcA= github.com/hashicorp/hcp-sdk-go v0.22.0 h1:LWkLOkJFYWSojBM3IkwvYK6nrwrL+p4Fw8zEaoCQG10= github.com/hashicorp/hcp-sdk-go v0.22.0/go.mod h1:mM3nYdVHuv2X2tv88MGVKRf/o2k3zF8jUZSMkwICQ28= github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d h1:9ARUJJ1VVynB176G1HCwleORqCaXm/Vx0uUi0dL26I0= @@ -1092,6 +1096,8 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/ github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 h1:lc3c72qGlIMDqQpQH82Y4vaglRMMFdJbziYWriR4UcE= +github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69/go.mod h1:/z+jUGRBlwVpUZfjute9jWaF6/HuhjuFQuL1YXzVD1Q= github.com/hashicorp/nomad/api v0.0.0-20220707195938-75f4c2237b28 h1:fo8EbQ6tc9hYqxik9CAdFMqy48TW8hh2I3znysPqf+0= github.com/hashicorp/nomad/api v0.0.0-20220707195938-75f4c2237b28/go.mod h1:FslB+3eLbZgkuPWffqO1GeNzBFw1SuVqN2PXsMNe0Fg= github.com/hashicorp/raft v1.0.1/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= @@ -1164,6 +1170,8 @@ github.com/hashicorp/vault-plugin-secrets-terraform v0.6.0/go.mod h1:GzYAJYytgbN github.com/hashicorp/vault-testing-stepwise v0.1.1/go.mod h1:3vUYn6D0ZadvstNO3YQQlIcp7u1a19MdoOC0NQ0yaOE= github.com/hashicorp/vault-testing-stepwise v0.1.2 h1:3obC/ziAPGnsz2IQxr5e4Ayb7tu7WL6pm6mmZ5gwhhs= github.com/hashicorp/vault-testing-stepwise v0.1.2/go.mod h1:TeU6B+5NqxUjto+Zey+QQEH1iywuHn0ciHZNYh4q3uI= +github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20221202210228-12b2fab87559 h1:fCQL4JFn/iQJsM3eQPDGtiGxEXEE+A3Yv4TBwk8IJAA= +github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20221202210228-12b2fab87559/go.mod h1:hsrm1EXNYpQwErumOLeaTY74+Tj7poOwckbR4d+0hEQ= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= @@ -1403,6 +1411,8 @@ github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceT github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/cli v1.1.2 h1:PvH+lL2B7IQ101xQL63Of8yFS2y+aDlsFcsqNc+u/Kw= github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -1698,6 +1708,8 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= diff --git a/helper/forwarding/types.pb.go b/helper/forwarding/types.pb.go index 2182e12cb..caf9270f5 100644 --- a/helper/forwarding/types.pb.go +++ b/helper/forwarding/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.7 +// protoc v3.21.9 // source: helper/forwarding/types.proto package forwarding diff --git a/helper/identity/mfa/types.pb.go b/helper/identity/mfa/types.pb.go index 8de54d5f9..5a1d8e98e 100644 --- a/helper/identity/mfa/types.pb.go +++ b/helper/identity/mfa/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.7 +// protoc v3.21.9 // source: helper/identity/mfa/types.proto package mfa diff --git a/helper/identity/types.pb.go b/helper/identity/types.pb.go index e19edfbde..c4a4dadf6 100644 --- a/helper/identity/types.pb.go +++ b/helper/identity/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.7 +// protoc v3.21.9 // source: helper/identity/types.proto package identity diff --git a/helper/storagepacker/types.pb.go b/helper/storagepacker/types.pb.go index 1d4d87b6b..c602303af 100644 --- a/helper/storagepacker/types.pb.go +++ b/helper/storagepacker/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.7 +// protoc v3.21.9 // source: helper/storagepacker/types.proto package storagepacker diff --git a/physical/raft/types.pb.go b/physical/raft/types.pb.go index b35293109..8ca994a31 100644 --- a/physical/raft/types.pb.go +++ b/physical/raft/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.7 +// protoc v3.21.9 // source: physical/raft/types.proto package raft diff --git a/sdk/database/dbplugin/database.pb.go b/sdk/database/dbplugin/database.pb.go index df7a138e0..ae0dbd723 100644 --- a/sdk/database/dbplugin/database.pb.go +++ b/sdk/database/dbplugin/database.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.7 +// protoc v3.21.9 // source: sdk/database/dbplugin/database.proto package dbplugin diff --git a/sdk/database/dbplugin/v5/proto/database.pb.go b/sdk/database/dbplugin/v5/proto/database.pb.go index dd595c904..3789a51c1 100644 --- a/sdk/database/dbplugin/v5/proto/database.pb.go +++ b/sdk/database/dbplugin/v5/proto/database.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.7 +// protoc v3.21.9 // source: sdk/database/dbplugin/v5/proto/database.proto package proto diff --git a/sdk/helper/pluginutil/multiplexing.pb.go b/sdk/helper/pluginutil/multiplexing.pb.go index 96963af3e..cfd463d6a 100644 --- a/sdk/helper/pluginutil/multiplexing.pb.go +++ b/sdk/helper/pluginutil/multiplexing.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.7 +// protoc v3.21.9 // source: sdk/helper/pluginutil/multiplexing.proto package pluginutil diff --git a/sdk/logical/identity.pb.go b/sdk/logical/identity.pb.go index 18af6e682..42c722afe 100644 --- a/sdk/logical/identity.pb.go +++ b/sdk/logical/identity.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.7 +// protoc v3.21.9 // source: sdk/logical/identity.proto package logical diff --git a/sdk/logical/plugin.pb.go b/sdk/logical/plugin.pb.go index 9be723e14..f3a9ec52c 100644 --- a/sdk/logical/plugin.pb.go +++ b/sdk/logical/plugin.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.7 +// protoc v3.21.9 // source: sdk/logical/plugin.proto package logical diff --git a/sdk/logical/version.pb.go b/sdk/logical/version.pb.go index 415970f19..fb3ce8121 100644 --- a/sdk/logical/version.pb.go +++ b/sdk/logical/version.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.7 +// protoc v3.21.9 // source: sdk/logical/version.proto package logical diff --git a/sdk/plugin/pb/backend.pb.go b/sdk/plugin/pb/backend.pb.go index 88d55809c..fd6971d8f 100644 --- a/sdk/plugin/pb/backend.pb.go +++ b/sdk/plugin/pb/backend.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.7 +// protoc v3.21.9 // source: sdk/plugin/pb/backend.proto package pb diff --git a/vault/activity/activity_log.pb.go b/vault/activity/activity_log.pb.go index b61e76aad..55346e6b7 100644 --- a/vault/activity/activity_log.pb.go +++ b/vault/activity/activity_log.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.7 +// protoc v3.21.9 // source: vault/activity/activity_log.proto package activity diff --git a/vault/external_tests/hcp_link/hcp_link_test.go b/vault/external_tests/hcp_link/hcp_link_test.go new file mode 100644 index 000000000..daedd3ad6 --- /dev/null +++ b/vault/external_tests/hcp_link/hcp_link_test.go @@ -0,0 +1,35 @@ +package hcp_link + +import ( + "testing" + + scada "github.com/hashicorp/hcp-scada-provider" + "github.com/hashicorp/vault/vault" +) + +func TestHCPLinkConnected(t *testing.T) { + t.Parallel() + cluster := getTestCluster(t, 2) + defer cluster.Cleanup() + + vaultHCPLink, _ := TestClusterWithHCPLinkEnabled(t, cluster, false, false) + defer vaultHCPLink.Cleanup() + + for _, core := range cluster.Cores { + checkLinkStatus(core.Client, scada.SessionStatusConnected, t) + } +} + +func TestHCPLinkNotConfigured(t *testing.T) { + t.Parallel() + cluster := getTestCluster(t, 2) + defer cluster.Cleanup() + + cluster.Start() + core := cluster.Cores[0].Core + vault.TestWaitActive(t, core) + + for _, core := range cluster.Cores { + checkLinkStatus(core.Client, "", t) + } +} diff --git a/vault/external_tests/hcp_link/test_helpers.go b/vault/external_tests/hcp_link/test_helpers.go new file mode 100644 index 000000000..4b729b0b0 --- /dev/null +++ b/vault/external_tests/hcp_link/test_helpers.go @@ -0,0 +1,138 @@ +package hcp_link + +import ( + "os" + "testing" + "time" + + sdkResource "github.com/hashicorp/hcp-sdk-go/resource" + "github.com/hashicorp/vault/api" + credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/hcp_link" +) + +type VaultHCPLinkInstances struct { + instances []*hcp_link.HCPLinkVault +} + +func NewVaultHCPLinkInstances() *VaultHCPLinkInstances { + i := &VaultHCPLinkInstances{ + instances: make([]*hcp_link.HCPLinkVault, 0), + } + + return i +} + +func (v *VaultHCPLinkInstances) Cleanup() { + for _, inst := range v.instances { + inst.Shutdown() + } +} + +func getHCPConfig(t *testing.T, clientID, clientSecret string) *configutil.HCPLinkConfig { + resourceIDRaw, ok := os.LookupEnv("HCP_RESOURCE_ID") + if !ok { + t.Skip("failed to find the HCP resource ID") + } + res, err := sdkResource.FromString(resourceIDRaw) + if err != nil { + t.Fatalf("failed to parse the resource ID, %v", err.Error()) + } + return &configutil.HCPLinkConfig{ + ResourceIDRaw: resourceIDRaw, + Resource: &res, + ClientID: clientID, + ClientSecret: clientSecret, + } +} + +func getTestCluster(t *testing.T, numCores int) *vault.TestCluster { + t.Helper() + coreConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "userpass": credUserpass.Factory, + }, + } + + if numCores <= 0 { + numCores = 1 + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: numCores, + }) + + return cluster +} + +func TestClusterWithHCPLinkEnabled(t *testing.T, cluster *vault.TestCluster, enableAPICap, enablePassthroughCap bool) (*VaultHCPLinkInstances, *configutil.HCPLinkConfig) { + t.Helper() + clientID, ok := os.LookupEnv("HCP_CLIENT_ID") + if !ok { + t.Skip("HCP client ID not found in env") + } + clientSecret, ok := os.LookupEnv("HCP_CLIENT_SECRET") + if !ok { + t.Skip("HCP client secret not found in env") + } + + if _, ok := os.LookupEnv("HCP_API_ADDRESS"); !ok { + t.Skip("failed to find HCP_API_ADDRESS in the environment") + } + if _, ok := os.LookupEnv("HCP_SCADA_ADDRESS"); !ok { + t.Skip("failed to find HCP_SCADA_ADDRESS in the environment") + } + if _, ok := os.LookupEnv("HCP_AUTH_URL"); !ok { + t.Skip("failed to find HCP_AUTH_URL in the environment") + } + + hcpConfig := getHCPConfig(t, clientID, clientSecret) + if enableAPICap { + hcpConfig.EnableAPICapability = true + } + if enablePassthroughCap { + hcpConfig.EnablePassThroughCapability = true + } + hcpLinkIns := NewVaultHCPLinkInstances() + + cluster.Start() + + core := cluster.Cores[0].Core + vault.TestWaitActive(t, core) + + for _, c := range cluster.Cores { + logger := c.Logger().Named("hcpLink") + vaultHCPLink, err := hcp_link.NewHCPLink(hcpConfig, c.Core, logger) + if err != nil { + t.Fatalf("failed to start HCP link, %v", err) + } + hcpLinkIns.instances = append(hcpLinkIns.instances, vaultHCPLink) + } + + return hcpLinkIns, hcpConfig +} + +func checkLinkStatus(client *api.Client, expectedStatus string, t *testing.T) { + deadline := time.Now().Add(10 * time.Second) + var status *api.SealStatusResponse + var err error + for time.Now().Before(deadline) { + status, err = client.Sys().SealStatus() + if err != nil { + t.Fatal(err) + } + if status.HCPLinkStatus == expectedStatus { + break + } + time.Sleep(500 * time.Millisecond) + } + + if status.HCPLinkStatus != expectedStatus { + t.Fatalf("HCP link did not behave as expected. expected status %v, actual status %v", expectedStatus, status.HCPLinkStatus) + } +} diff --git a/vault/hcp_link/capabilities/api_capability/api_capability.go b/vault/hcp_link/capabilities/api_capability/api_capability.go new file mode 100644 index 000000000..0000c5f20 --- /dev/null +++ b/vault/hcp_link/capabilities/api_capability/api_capability.go @@ -0,0 +1,140 @@ +package api_capability + +import ( + "context" + "fmt" + "net/http" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + scada "github.com/hashicorp/hcp-scada-provider" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/hcp_link/capabilities" +) + +type APICapability struct { + l sync.Mutex + logger hclog.Logger + scadaProvider scada.SCADAProvider + scadaServer *http.Server + tokenManager *HCPLinkTokenManager + running bool +} + +var _ capabilities.Capability = &APICapability{} + +func NewAPICapability(scadaConfig *scada.Config, scadaProvider scada.SCADAProvider, core *vault.Core, logger hclog.Logger) (*APICapability, error) { + apiLogger := logger.Named(capabilities.APICapability) + tokenManager, err := NewHCPLinkTokenManager(scadaConfig, core, apiLogger) + if err != nil { + return nil, fmt.Errorf("failed to start HCP Link token manager") + } + + linkHandler := injectBatchTokenHandler(vaulthttp.Handler.Handler(&vault.HandlerProperties{Core: core}), tokenManager) + + apiLogger.Trace("initializing HCP Link API capability") + + // server defaults + server := &http.Server{ + Handler: linkHandler, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: apiLogger.StandardLogger(nil), + } + return &APICapability{ + logger: apiLogger, + scadaProvider: scadaProvider, + scadaServer: server, + tokenManager: tokenManager, + }, nil +} + +func (c *APICapability) Start() error { + c.l.Lock() + defer c.l.Unlock() + + if c.running { + return nil + } + + // Start listening on a SCADA capability + listener, err := c.scadaProvider.Listen(capabilities.APICapability) + if err != nil { + return fmt.Errorf("failed to start listening on a capability: %w", err) + } + + go func() { + err = c.scadaServer.Serve(listener) + c.logger.Error("server closed", "error", err) + }() + + c.running = true + c.logger.Info("started HCP Link API capability") + + return nil +} + +func (c *APICapability) Stop() error { + c.l.Lock() + defer c.l.Unlock() + + if !c.running { + return nil + } + + var retErr *multierror.Error + + c.logger.Info("Tearing down HCP Link API capability") + + err := c.scadaServer.Shutdown(context.Background()) + if err != nil { + retErr = multierror.Append(err, fmt.Errorf("failed to shutdown scada provider HTTP server %w", err)) + } + c.scadaServer = nil + + c.tokenManager.Shutdown() + c.tokenManager = nil + + c.running = false + + return retErr.ErrorOrNil() +} + +func (c *APICapability) PurgePolicy() { + if c.tokenManager == nil { + return + } + c.tokenManager.ForgetTokenPolicy() + + return +} + +func injectBatchTokenHandler(handler http.Handler, tokenManager *HCPLinkTokenManager) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + tokenManager.logger.Debug("received request", "method", r.Method, "path", r.URL.Path) + + // Only the hcp link token should be used + r.Header.Del(consts.AuthHeaderName) + + // for Standby or perfStandby return 412 + standby, perfStandby := tokenManager.wrappedCore.StandbyStates() + + hcpLinkToken := tokenManager.HandleTokenPolicy(r.Context(), !standby && !perfStandby) + + if standby || perfStandby { + logical.RespondError(w, http.StatusPreconditionFailed, fmt.Errorf("API capability is inactive in non-Active nodes")) + return + } + + r.Header.Set(consts.AuthHeaderName, hcpLinkToken) + + handler.ServeHTTP(w, r) + return + }) +} diff --git a/vault/hcp_link/capabilities/api_capability/api_passthrough.go b/vault/hcp_link/capabilities/api_capability/api_passthrough.go new file mode 100644 index 000000000..55c64840f --- /dev/null +++ b/vault/hcp_link/capabilities/api_capability/api_passthrough.go @@ -0,0 +1,103 @@ +package api_capability + +import ( + "context" + "fmt" + "net/http" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + scada "github.com/hashicorp/hcp-scada-provider" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/hcp_link/capabilities" + "github.com/hashicorp/vault/vault/hcp_link/internal" +) + +type APIPassThroughCapability struct { + l sync.Mutex + logger hclog.Logger + scadaProvider scada.SCADAProvider + scadaServer *http.Server + running bool +} + +var _ capabilities.Capability = &APIPassThroughCapability{} + +func NewAPIPassThroughCapability(scadaProvider scada.SCADAProvider, core *vault.Core, logger hclog.Logger) (*APIPassThroughCapability, error) { + apiLogger := logger.Named(capabilities.APIPassThroughCapability) + + linkHandler := requestHandler(vaulthttp.Handler.Handler(&vault.HandlerProperties{Core: core}), core, apiLogger) + + apiLogger.Trace("initializing HCP Link API PassThrough capability") + + // server defaults + server := &http.Server{ + Handler: linkHandler, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: apiLogger.StandardLogger(nil), + } + return &APIPassThroughCapability{ + logger: apiLogger, + scadaProvider: scadaProvider, + scadaServer: server, + }, nil +} + +func (p *APIPassThroughCapability) Start() error { + p.l.Lock() + defer p.l.Unlock() + + if p.running { + return nil + } + + // Start listening on a SCADA capability + listener, err := p.scadaProvider.Listen(capabilities.APIPassThroughCapability) + if err != nil { + return fmt.Errorf("failed to start listening on a capability: %w", err) + } + + go func() { + err = p.scadaServer.Serve(listener) + p.logger.Error("server closed", "error", err) + }() + + p.running = true + p.logger.Info("started HCP Link API PassThrough capability") + + return nil +} + +func (p *APIPassThroughCapability) Stop() error { + p.l.Lock() + defer p.l.Unlock() + + if !p.running { + return nil + } + + p.logger.Info("Tearing down HCP Link API passthrough capability") + + var retErr error + err := p.scadaServer.Shutdown(context.Background()) + if err != nil { + retErr = fmt.Errorf("failed to shutdown scada provider HTTP server %w", err) + } + p.scadaServer = nil + p.running = false + + return retErr +} + +func requestHandler(handler http.Handler, wrappedCore internal.WrappedCoreStandbyStates, logger hclog.Logger) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + logger.Debug("received a request in HCP link API passthrough", "method", r.Method, "path", r.URL.Path) + + handler.ServeHTTP(w, r) + return + }) +} diff --git a/vault/hcp_link/capabilities/api_capability/token_manager.go b/vault/hcp_link/capabilities/api_capability/token_manager.go new file mode 100644 index 000000000..312473c36 --- /dev/null +++ b/vault/hcp_link/capabilities/api_capability/token_manager.go @@ -0,0 +1,269 @@ +package api_capability + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "sync" + "time" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-retryablehttp" + scada "github.com/hashicorp/hcp-scada-provider" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/hcp_link/internal" +) + +const ( + // HCP policy fetch retry limits + policyRetryWaitMin = 500 * time.Millisecond + policyRetryWaitMax = 30 * time.Second + policyRetryMax = 3 + + // batchTokenDefaultTTL default TTL of a batch token + batchTokenDefaultTTL = 5 * time.Minute + + // tokenExpiryOffset is deducted from token expiry to make sure a new token + // is generated before an old one is expired. + // This is used when Vault is unsealed. + tokenExpiryOffset = 5 * time.Second +) + +type HCPLinkTokenManager struct { + lock sync.RWMutex + wrappedCore internal.WrappedCoreHCPToken + logger hclog.Logger + scadaConfig *scada.Config + policyUrl string + latestToken string + tokenTTL time.Duration + policy string + lastTokenExpiry time.Time +} + +func (t *HCPLinkTokenManager) SetTokenTTL(ttl time.Duration) error { + if ttl < tokenExpiryOffset { + return fmt.Errorf("ttl cannot be less than 5 seconds") + } + + t.lock.Lock() + defer t.lock.Unlock() + t.tokenTTL = ttl + + return nil +} + +func (t *HCPLinkTokenManager) GetLatestToken() string { + t.lock.RLock() + latestToken := t.latestToken + t.lock.RUnlock() + + return latestToken +} + +func (t *HCPLinkTokenManager) GetLastTokenExpiry() time.Time { + t.lock.RLock() + te := t.lastTokenExpiry + t.lock.RUnlock() + + return te +} + +func (t *HCPLinkTokenManager) GetPolicy() string { + t.lock.RLock() + policy := t.policy + t.lock.RUnlock() + + return policy +} + +func (t *HCPLinkTokenManager) fetchPolicy() (string, error) { + req, err := http.NewRequest(http.MethodGet, t.policyUrl, nil) + if err != nil { + return "", fmt.Errorf("error creating HTTP request: %w", err) + } + + query := req.URL.Query() + query.Add("cluster_id", t.scadaConfig.Resource.ID) + req.URL.RawQuery = query.Encode() + + retryableReq, err := retryablehttp.FromRequest(req) + if err != nil { + return "", fmt.Errorf("error adding HTTP request retry wrapping: %w", err) + } + + token, err := t.scadaConfig.HCPConfig.Token() + if err != nil { + return "", fmt.Errorf("unable to retrieve HCP bearer token: %w", err) + } + + retryableReq.Header.Add("Authorization", "Bearer "+token.AccessToken) + + client := &retryablehttp.Client{ + HTTPClient: cleanhttp.DefaultClient(), + RetryWaitMin: policyRetryWaitMin, + RetryWaitMax: policyRetryWaitMax, + RetryMax: policyRetryMax, + CheckRetry: retryablehttp.DefaultRetryPolicy, + Backoff: retryablehttp.DefaultBackoff, + } + + resp, err := client.Do(retryableReq) + if err != nil { + return "", fmt.Errorf("error retrieving policy from HCP: %w", err) + } + + if resp.Body == nil { + return "", fmt.Errorf("invalid HCP policy response: %w", err) + } + + bodyBytes := bytes.NewBuffer(nil) + _, err = bodyBytes.ReadFrom(resp.Body) + if err != nil { + return "", fmt.Errorf("error reading HCP policy response: %w", err) + } + + if err := resp.Body.Close(); err != nil { + return "", fmt.Errorf("error closing response body: %w", err) + } + + body := make(map[string]interface{}) + err = json.Unmarshal(bodyBytes.Bytes(), &body) + if err != nil { + return "", fmt.Errorf("error parsing response body: %w", err) + } + + policy, ok := body["policy"].(string) + if !ok { + return "", fmt.Errorf("formatting for policy fetched from HCP is invalid, expected string received %T", body["policy"]) + } + + return policy, nil +} + +func (t *HCPLinkTokenManager) updateInLinePolicy() { + policy, err := t.fetchPolicy() + if err != nil { + t.logger.Error("failed to fetch policy from HCP", "error", err) + return + } + + t.logger.Info("new policy fetched from HCP") + + t.lock.Lock() + t.policy = policy + t.lock.Unlock() + + return +} + +func NewHCPLinkTokenManager(scadaConfig *scada.Config, core *vault.Core, logger hclog.Logger) (*HCPLinkTokenManager, error) { + tokenLogger := logger.Named("token_manager") + + policyURL := fmt.Sprintf("https://%s/vault/2020-11-25/organizations/%s/projects/%s/link/policy", + scadaConfig.HCPConfig.APIAddress(), + scadaConfig.Resource.Location.OrganizationID, + scadaConfig.Resource.Location.ProjectID, + ) + + m := &HCPLinkTokenManager{ + wrappedCore: core, + logger: tokenLogger, + scadaConfig: scadaConfig, + policyUrl: policyURL, + tokenTTL: batchTokenDefaultTTL, + lastTokenExpiry: time.Time{}, + } + + m.logger.Info("initialized HCP Link token manager") + + return m, nil +} + +func (m *HCPLinkTokenManager) Shutdown() { + m.ForgetTokenPolicy() +} + +// HandleTokenPolicy checks if Vault is sealed or not an active node, +// then it removes both token and policy. And, if Vault is not sealed, +// and token needs to be refreshed it refreshes both policy and token +func (m *HCPLinkTokenManager) HandleTokenPolicy(ctx context.Context, activeNode bool) string { + switch { + case m.wrappedCore.Sealed(), !activeNode: + m.logger.Debug("failed to create a token as Vault is either sealed or a non-active node. Setting the token to an empty string") + m.ForgetTokenPolicy() + case m.GetLatestToken() == "", m.GetLastTokenExpiry().Before(time.Now()): + m.createToken(ctx) + } + + return m.GetLatestToken() +} + +// ForgetTokenPolicy Forgets the current Batch token, its associated policy, +// and sets the lastTokenExpiry to Now such that the policy is forced to be +// refreshed by the next valid request. +func (m *HCPLinkTokenManager) ForgetTokenPolicy() { + m.lock.Lock() + defer m.lock.Unlock() + + // purging the latest token and policy + m.latestToken = "" + m.policy = "" + + // setting the token expiry to now as a cleanup step + m.lastTokenExpiry = time.Now() + m.logger.Info("purged token and inline policy") +} + +func (m *HCPLinkTokenManager) createToken(ctx context.Context) { + // updating the policy first + m.updateInLinePolicy() + policy := m.GetPolicy() + + m.lock.Lock() + defer m.lock.Unlock() + + // an orphan batch token is required. + // For an orphan token, we need to not set the parent + // Also setting the time of creation, and metadata for auditing + te := &logical.TokenEntry{ + Type: logical.TokenTypeBatch, + TTL: m.tokenTTL, + CreationTime: time.Now().Unix(), + NamespaceID: namespace.RootNamespaceID, + NoIdentityPolicies: true, + Meta: map[string]string{ + "hcp_link_token": "HCP Link Access Token", + }, + InlinePolicy: policy, + InternalMeta: map[string]string{vault.IgnoreForBilling: "true"}, + } + // try creating the token, if the trial fails, let's set the latestToken + // to an empty string, and reset token expiry with a backoff strategy + err := m.wrappedCore.CreateToken(ctx, te) + if err != nil { + m.logger.Error("failed to create a token, setting the token to an empty string", "error", err) + m.latestToken = "" + m.lastTokenExpiry = time.Now() + return + } + + if te == nil || len(te.ID) == 0 { + m.logger.Error("token creation returned an empty token entry") + m.latestToken = "" + m.lastTokenExpiry = time.Now() + return + } + + m.latestToken = te.ID + // storing the new token and setting lastTokenExpiry to be 5 seconds before + // the TTL + m.lastTokenExpiry = time.Now().Add(m.tokenTTL - tokenExpiryOffset) + + m.logger.Info("successfully generated a new token for HCP link") +} diff --git a/vault/hcp_link/capabilities/api_capability/token_manager_test.go b/vault/hcp_link/capabilities/api_capability/token_manager_test.go new file mode 100644 index 000000000..0befd58d8 --- /dev/null +++ b/vault/hcp_link/capabilities/api_capability/token_manager_test.go @@ -0,0 +1,315 @@ +package api_capability + +import ( + "context" + "os" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + scada "github.com/hashicorp/hcp-scada-provider" + sdkResource "github.com/hashicorp/hcp-sdk-go/resource" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/hcp_link/internal" +) + +func getHCPConfig(t *testing.T, clientID, clientSecret string) *configutil.HCPLinkConfig { + resourceIDRaw, ok := os.LookupEnv("HCP_RESOURCE_ID") + if !ok { + t.Skip("failed to find the HCP resource ID") + } + res, err := sdkResource.FromString(resourceIDRaw) + if err != nil { + t.Fatalf("failed to parse the resource ID, %v", err.Error()) + } + return &configutil.HCPLinkConfig{ + ResourceIDRaw: resourceIDRaw, + Resource: &res, + ClientID: clientID, + ClientSecret: clientSecret, + } +} + +func getTestScadaConfig(t *testing.T) *scada.Config { + clientID, ok := os.LookupEnv("HCP_CLIENT_ID") + if !ok { + t.Skip("HCP client ID not found in env") + } + clientSecret, ok := os.LookupEnv("HCP_CLIENT_SECRET") + if !ok { + t.Skip("HCP client secret not found in env") + } + + if _, ok := os.LookupEnv("HCP_API_ADDRESS"); !ok { + t.Skip("failed to find HCP_API_ADDRESS in the environment") + } + if _, ok := os.LookupEnv("HCP_SCADA_ADDRESS"); !ok { + t.Skip("failed to find HCP_SCADA_ADDRESS in the environment") + } + if _, ok := os.LookupEnv("HCP_AUTH_URL"); !ok { + t.Skip("failed to find HCP_AUTH_URL in the environment") + } + + hcpConfig := getHCPConfig(t, clientID, clientSecret) + + scadaConfig, err := internal.NewScadaConfig(hcpConfig, hclog.New(nil)) + if err != nil { + t.Fatalf("failed to initialize Scada config") + } + return scadaConfig +} + +func TestCreateTokenCoreSealedUnSealed(t *testing.T) { + t.Parallel() + core := vault.TestCore(t) + logger := hclog.New(nil) + + scadaConfig := getTestScadaConfig(t) + + tm, err := NewHCPLinkTokenManager(scadaConfig, core, logger) + if err != nil { + t.Fatalf("failed to instantiate token manager") + } + + if tm.latestToken != "" { + t.Fatalf("unexpected latest token") + } + + ctx := context.Background() + tm.createToken(ctx) + + if tm.latestToken != "" { + t.Fatalf("unexpected latest token while core is sealed") + } + + // unsealing core + vault.TestInitUnsealCore(t, core) + + tm.createToken(ctx) + if tm.latestToken == "" { + t.Fatalf("latestToken should not be empty") + } + latestTokenOld := tm.latestToken + + // running update token again should not change the token + tm.createToken(ctx) + + if tm.latestToken == latestTokenOld { + t.Fatalf("latestToken should have been refreshed") + } +} + +func TestShutdownTokenManagerForgetsTokenPolicy(t *testing.T) { + t.Parallel() + core := vault.TestCore(t) + logger := hclog.New(nil) + + scadaConfig := getTestScadaConfig(t) + + tm, err := NewHCPLinkTokenManager(scadaConfig, core, logger) + if err != nil { + t.Fatalf("failed to instantiate token manager") + } + + // unsealing core + vault.TestInitUnsealCore(t, core) + + tm.HandleTokenPolicy(context.Background(), true) + if tm.GetLatestToken() == "" { + t.Fatalf("token manager did not update both token and policy") + } + + tm.Shutdown() + + if tm.GetLatestToken() != "" || tm.policy != "" { + t.Fatalf("shutting down TM did not forget both token and policy") + } +} + +func TestSealVaultTokenManagerForgetsTokenPolicy(t *testing.T) { + t.Parallel() + core := vault.TestCore(t) + logger := hclog.New(nil) + + scadaConfig := getTestScadaConfig(t) + + tm, err := NewHCPLinkTokenManager(scadaConfig, core, logger) + if err != nil { + t.Fatalf("failed to instantiate token manager") + } + + // unsealing core + vault.TestInitUnsealCore(t, core) + + ctx := context.Background() + tm.HandleTokenPolicy(ctx, true) + + if tm.GetLatestToken() == "" { + t.Fatalf("token manager did not update both token and policy") + } + + // seal core + err = vault.TestCoreSeal(core) + if err != nil { + t.Fatalf("failed to seal core") + } + + tm.HandleTokenPolicy(ctx, true) + if tm.GetLatestToken() != "" || tm.GetPolicy() != "" { + t.Fatalf("vault is seal, TM did not forget both token and policy") + } +} + +func TestCreateTokenWithTTL(t *testing.T) { + t.Parallel() + core := vault.TestCore(t) + logger := hclog.New(nil) + + scadaConfig := getTestScadaConfig(t) + + tm, err := NewHCPLinkTokenManager(scadaConfig, core, logger) + if err != nil { + t.Fatalf("failed to instantiate token manager") + } + + // unsealing core + vault.TestInitUnsealCore(t, core) + + ttl := 7 * time.Second + err = tm.SetTokenTTL(ttl) + if err != nil { + t.Fatalf("failed to set token TTL") + } + + ctx := context.Background() + tm.createToken(ctx) + + latestToken := tm.GetLatestToken() + if latestToken == "" { + t.Fatalf("latestToken should not be empty") + } + + te, err := core.LookupToken(namespace.RootContext(nil), latestToken) + if err != nil { + t.Fatalf("failed to look up token") + } + if te.TTL != ttl { + t.Fatalf("ttl is not as expected") + } + + // sleep until the token is expired + deadline := time.Now().Add(8 * time.Second) + for time.Now().Before(deadline) { + te, err = core.LookupToken(namespace.RootContext(nil), latestToken) + if err == nil && te == nil { + break + } + time.Sleep(500 * time.Millisecond) + } + if err != nil && te != nil { + t.Fatalf("token did not expire as expected") + } +} + +func TestHandleTokenPolicyWithTokenTTL(t *testing.T) { + t.Parallel() + core := vault.TestCore(t) + logger := hclog.New(nil) + + scadaConfig := getTestScadaConfig(t) + + tm, err := NewHCPLinkTokenManager(scadaConfig, core, logger) + if err != nil { + t.Fatalf("failed to instantiate token manager") + } + + // unsealing core + vault.TestInitUnsealCore(t, core) + + ttl := 7 * time.Second + err = tm.SetTokenTTL(ttl) + if err != nil { + t.Fatalf("failed to set token TTL") + } + + ctx := context.Background() + tm.createToken(ctx) + + latestToken := tm.GetLatestToken() + if latestToken == "" { + t.Fatalf("latestToken should not be empty") + } + + // waiting until TTL - 5 seconds is past + time.Sleep(ttl - tokenExpiryOffset + 1) + + newToken := tm.HandleTokenPolicy(ctx, true) + if newToken == "" || newToken == latestToken { + t.Fatalf("token did not refreshed after expiry") + } +} + +func TestHandleTokenPolicySealedUnsealed(t *testing.T) { + t.Parallel() + core := vault.TestCore(t) + logger := hclog.New(nil) + + scadaConfig := getTestScadaConfig(t) + + tm, err := NewHCPLinkTokenManager(scadaConfig, core, logger) + if err != nil { + t.Fatalf("failed to instantiate token manager") + } + + tm.latestToken = "non empty" + tm.policy = "non empty" + + ctx := context.Background() + tm.HandleTokenPolicy(ctx, true) + if tm.GetLatestToken() != "" || tm.GetPolicy() != "" { + t.Fatalf("on sealed Vault, token and policy was not deleted") + } + + tm.latestToken = "non empty" + tm.policy = "non empty" + + // unsealing core + vault.TestInitUnsealCore(t, core) + + tm.HandleTokenPolicy(ctx, true) + + if tm.GetLatestToken() == "non empty" { + t.Fatalf("latestToken and policy should have been updated") + } +} + +func TestForgetTokenPolicySealedUnsealed(t *testing.T) { + t.Parallel() + core := vault.TestCore(t) + logger := hclog.New(nil) + + scadaConfig := getTestScadaConfig(t) + + tm, err := NewHCPLinkTokenManager(scadaConfig, core, logger) + if err != nil { + t.Fatalf("failed to instantiate token manager") + } + + // unsealing core + vault.TestInitUnsealCore(t, core) + + ctx := context.Background() + tm.HandleTokenPolicy(ctx, true) + + if tm.GetLatestToken() == "" { + t.Fatalf("on sealed Vault, token and policy was not refreshed") + } + + tm.ForgetTokenPolicy() + + if tm.GetLatestToken() != "" || tm.GetPolicy() != "" { + t.Fatalf("on sealed Vault, token and policy was not deleted") + } +} diff --git a/vault/hcp_link/capabilities/link_control/link_control.go b/vault/hcp_link/capabilities/link_control/link_control.go new file mode 100644 index 000000000..e46d882f3 --- /dev/null +++ b/vault/hcp_link/capabilities/link_control/link_control.go @@ -0,0 +1,124 @@ +package link_control + +import ( + "context" + "fmt" + "math" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + scada "github.com/hashicorp/hcp-scada-provider" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/cluster" + "github.com/hashicorp/vault/vault/hcp_link/capabilities" + "github.com/hashicorp/vault/vault/hcp_link/internal" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/reflection" +) + +type purgePolicyFunc func() + +type hcpLinkControlHandler struct { + UnimplementedHCPLinkControlServer + + purgeFunc purgePolicyFunc + wrappedCore internal.WrappedCoreStandbyStates + scadaProvider scada.SCADAProvider + logger hclog.Logger + + l sync.Mutex + grpcServer *grpc.Server + running bool +} + +func NewHCPLinkControlService(scadaProvider scada.SCADAProvider, core *vault.Core, policyPurger purgePolicyFunc, baseLogger hclog.Logger) *hcpLinkControlHandler { + logger := baseLogger.Named(capabilities.LinkControlCapability) + logger.Trace("initializing HCP Link Control capability") + + grpcServer := grpc.NewServer( + grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: 2 * time.Second, + }), + grpc.MaxSendMsgSize(math.MaxInt32), + grpc.MaxRecvMsgSize(math.MaxInt32), + ) + + handler := &hcpLinkControlHandler{ + purgeFunc: policyPurger, + logger: logger, + grpcServer: grpcServer, + scadaProvider: scadaProvider, + wrappedCore: core, + } + + RegisterHCPLinkControlServer(grpcServer, handler) + reflection.Register(grpcServer) + + return handler +} + +func (h *hcpLinkControlHandler) Start() error { + h.l.Lock() + defer h.l.Unlock() + + if h.running { + return nil + } + + // Starting link-control service + linkControlListener, err := h.scadaProvider.Listen(capabilities.LinkControlCapability) + if err != nil { + return fmt.Errorf("failed to initialize link-control capability listener: %w", err) + } + + if linkControlListener == nil { + return fmt.Errorf("no listener found for link-control capability") + } + + // Start the gRPC server + go func() { + err = h.grpcServer.Serve(linkControlListener) + h.logger.Error("server closed", "error", err) + }() + + h.running = true + + h.logger.Trace("started HCP Link Control capability") + return nil +} + +func (h *hcpLinkControlHandler) Stop() error { + h.l.Lock() + defer h.l.Unlock() + + if !h.running { + return nil + } + + // Give some time for existing RPCs to drain. + time.Sleep(cluster.ListenerAcceptDeadline) + + h.logger.Info("Tearing down HCP Link Control") + + h.grpcServer.Stop() + + h.running = false + + return nil +} + +func (h *hcpLinkControlHandler) PurgePolicy(ctx context.Context, req *PurgePolicyRequest) (*PurgePolicyResponse, error) { + standby, perfStandby := h.wrappedCore.StandbyStates() + // only purging an active node, perf/standby nodes should purge + // automatically + if standby || perfStandby { + h.logger.Debug("cannot purge the policy on a non-active node") + } else { + h.purgeFunc() + h.logger.Debug("Purged token and policy") + } + + return &PurgePolicyResponse{}, nil +} diff --git a/vault/hcp_link/capabilities/link_control/link_control.pb.go b/vault/hcp_link/capabilities/link_control/link_control.pb.go new file mode 100644 index 000000000..d3cc4659c --- /dev/null +++ b/vault/hcp_link/capabilities/link_control/link_control.pb.go @@ -0,0 +1,199 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.9 +// source: vault/hcp_link/capabilities/link_control/link_control.proto + +package link_control + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PurgePolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PurgePolicyRequest) Reset() { + *x = PurgePolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vault_hcp_link_capabilities_link_control_link_control_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PurgePolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PurgePolicyRequest) ProtoMessage() {} + +func (x *PurgePolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_vault_hcp_link_capabilities_link_control_link_control_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PurgePolicyRequest.ProtoReflect.Descriptor instead. +func (*PurgePolicyRequest) Descriptor() ([]byte, []int) { + return file_vault_hcp_link_capabilities_link_control_link_control_proto_rawDescGZIP(), []int{0} +} + +type PurgePolicyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PurgePolicyResponse) Reset() { + *x = PurgePolicyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vault_hcp_link_capabilities_link_control_link_control_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PurgePolicyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PurgePolicyResponse) ProtoMessage() {} + +func (x *PurgePolicyResponse) ProtoReflect() protoreflect.Message { + mi := &file_vault_hcp_link_capabilities_link_control_link_control_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PurgePolicyResponse.ProtoReflect.Descriptor instead. +func (*PurgePolicyResponse) Descriptor() ([]byte, []int) { + return file_vault_hcp_link_capabilities_link_control_link_control_proto_rawDescGZIP(), []int{1} +} + +var File_vault_hcp_link_capabilities_link_control_link_control_proto protoreflect.FileDescriptor + +var file_vault_hcp_link_capabilities_link_control_link_control_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x68, 0x63, 0x70, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, + 0x2f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x2f, 0x6c, 0x69, + 0x6e, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x6c, + 0x69, 0x6e, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x22, 0x14, 0x0a, 0x12, 0x50, + 0x75, 0x72, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x15, 0x0a, 0x13, 0x50, 0x75, 0x72, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x64, 0x0a, 0x0e, 0x48, 0x43, 0x50, 0x4c, + 0x69, 0x6e, 0x6b, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x52, 0x0a, 0x0b, 0x50, 0x75, + 0x72, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x20, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x50, 0x75, 0x72, 0x67, 0x65, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6c, 0x69, + 0x6e, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x50, 0x75, 0x72, 0x67, 0x65, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x45, + 0x5a, 0x43, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x76, 0x61, 0x75, + 0x6c, 0x74, 0x2f, 0x68, 0x63, 0x70, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x2f, 0x63, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x2f, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_vault_hcp_link_capabilities_link_control_link_control_proto_rawDescOnce sync.Once + file_vault_hcp_link_capabilities_link_control_link_control_proto_rawDescData = file_vault_hcp_link_capabilities_link_control_link_control_proto_rawDesc +) + +func file_vault_hcp_link_capabilities_link_control_link_control_proto_rawDescGZIP() []byte { + file_vault_hcp_link_capabilities_link_control_link_control_proto_rawDescOnce.Do(func() { + file_vault_hcp_link_capabilities_link_control_link_control_proto_rawDescData = protoimpl.X.CompressGZIP(file_vault_hcp_link_capabilities_link_control_link_control_proto_rawDescData) + }) + return file_vault_hcp_link_capabilities_link_control_link_control_proto_rawDescData +} + +var file_vault_hcp_link_capabilities_link_control_link_control_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_vault_hcp_link_capabilities_link_control_link_control_proto_goTypes = []interface{}{ + (*PurgePolicyRequest)(nil), // 0: link_control.PurgePolicyRequest + (*PurgePolicyResponse)(nil), // 1: link_control.PurgePolicyResponse +} +var file_vault_hcp_link_capabilities_link_control_link_control_proto_depIdxs = []int32{ + 0, // 0: link_control.HCPLinkControl.PurgePolicy:input_type -> link_control.PurgePolicyRequest + 1, // 1: link_control.HCPLinkControl.PurgePolicy:output_type -> link_control.PurgePolicyResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_vault_hcp_link_capabilities_link_control_link_control_proto_init() } +func file_vault_hcp_link_capabilities_link_control_link_control_proto_init() { + if File_vault_hcp_link_capabilities_link_control_link_control_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_vault_hcp_link_capabilities_link_control_link_control_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PurgePolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vault_hcp_link_capabilities_link_control_link_control_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PurgePolicyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_vault_hcp_link_capabilities_link_control_link_control_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_vault_hcp_link_capabilities_link_control_link_control_proto_goTypes, + DependencyIndexes: file_vault_hcp_link_capabilities_link_control_link_control_proto_depIdxs, + MessageInfos: file_vault_hcp_link_capabilities_link_control_link_control_proto_msgTypes, + }.Build() + File_vault_hcp_link_capabilities_link_control_link_control_proto = out.File + file_vault_hcp_link_capabilities_link_control_link_control_proto_rawDesc = nil + file_vault_hcp_link_capabilities_link_control_link_control_proto_goTypes = nil + file_vault_hcp_link_capabilities_link_control_link_control_proto_depIdxs = nil +} diff --git a/vault/hcp_link/capabilities/link_control/link_control.proto b/vault/hcp_link/capabilities/link_control/link_control.proto new file mode 100644 index 000000000..d357f46f4 --- /dev/null +++ b/vault/hcp_link/capabilities/link_control/link_control.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/vault/hcp_link/capabilities/link_control"; + +package link_control; + +message PurgePolicyRequest {} + +message PurgePolicyResponse {} + +service HCPLinkControl { + // PurgePolicy Forgets the current Batch token, and its associated policy, + // such that the policy is forced to be refreshed. + rpc PurgePolicy(PurgePolicyRequest) returns (PurgePolicyResponse); +} \ No newline at end of file diff --git a/vault/hcp_link/capabilities/link_control/link_control_grpc.pb.go b/vault/hcp_link/capabilities/link_control/link_control_grpc.pb.go new file mode 100644 index 000000000..ac29e1c8a --- /dev/null +++ b/vault/hcp_link/capabilities/link_control/link_control_grpc.pb.go @@ -0,0 +1,105 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package link_control + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// HCPLinkControlClient is the client API for HCPLinkControl service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type HCPLinkControlClient interface { + // PurgePolicy Forgets the current Batch token, and its associated policy, + // such that the policy is forced to be refreshed. + PurgePolicy(ctx context.Context, in *PurgePolicyRequest, opts ...grpc.CallOption) (*PurgePolicyResponse, error) +} + +type hCPLinkControlClient struct { + cc grpc.ClientConnInterface +} + +func NewHCPLinkControlClient(cc grpc.ClientConnInterface) HCPLinkControlClient { + return &hCPLinkControlClient{cc} +} + +func (c *hCPLinkControlClient) PurgePolicy(ctx context.Context, in *PurgePolicyRequest, opts ...grpc.CallOption) (*PurgePolicyResponse, error) { + out := new(PurgePolicyResponse) + err := c.cc.Invoke(ctx, "/link_control.HCPLinkControl/PurgePolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// HCPLinkControlServer is the server API for HCPLinkControl service. +// All implementations must embed UnimplementedHCPLinkControlServer +// for forward compatibility +type HCPLinkControlServer interface { + // PurgePolicy Forgets the current Batch token, and its associated policy, + // such that the policy is forced to be refreshed. + PurgePolicy(context.Context, *PurgePolicyRequest) (*PurgePolicyResponse, error) + mustEmbedUnimplementedHCPLinkControlServer() +} + +// UnimplementedHCPLinkControlServer must be embedded to have forward compatible implementations. +type UnimplementedHCPLinkControlServer struct { +} + +func (UnimplementedHCPLinkControlServer) PurgePolicy(context.Context, *PurgePolicyRequest) (*PurgePolicyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PurgePolicy not implemented") +} +func (UnimplementedHCPLinkControlServer) mustEmbedUnimplementedHCPLinkControlServer() {} + +// UnsafeHCPLinkControlServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to HCPLinkControlServer will +// result in compilation errors. +type UnsafeHCPLinkControlServer interface { + mustEmbedUnimplementedHCPLinkControlServer() +} + +func RegisterHCPLinkControlServer(s grpc.ServiceRegistrar, srv HCPLinkControlServer) { + s.RegisterService(&HCPLinkControl_ServiceDesc, srv) +} + +func _HCPLinkControl_PurgePolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PurgePolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HCPLinkControlServer).PurgePolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/link_control.HCPLinkControl/PurgePolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HCPLinkControlServer).PurgePolicy(ctx, req.(*PurgePolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// HCPLinkControl_ServiceDesc is the grpc.ServiceDesc for HCPLinkControl service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var HCPLinkControl_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "link_control.HCPLinkControl", + HandlerType: (*HCPLinkControlServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "PurgePolicy", + Handler: _HCPLinkControl_PurgePolicy_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "vault/hcp_link/capabilities/link_control/link_control.proto", +} diff --git a/vault/hcp_link/capabilities/meta/meta.go b/vault/hcp_link/capabilities/meta/meta.go new file mode 100644 index 000000000..e930a46e7 --- /dev/null +++ b/vault/hcp_link/capabilities/meta/meta.go @@ -0,0 +1,190 @@ +package meta + +import ( + "context" + "fmt" + "math" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + scada "github.com/hashicorp/hcp-scada-provider" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/cluster" + "github.com/hashicorp/vault/vault/hcp_link/capabilities" + "github.com/hashicorp/vault/vault/hcp_link/internal" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/reflection" +) + +type hcpLinkMetaHandler struct { + UnimplementedHCPLinkMetaServer + + wrappedCore internal.WrappedCoreListNamespacesMounts + scadaProvider scada.SCADAProvider + logger hclog.Logger + + l sync.Mutex + grpcServer *grpc.Server + stopCh chan struct{} + running bool +} + +func NewHCPLinkMetaService(scadaProvider scada.SCADAProvider, c *vault.Core, baseLogger hclog.Logger) *hcpLinkMetaHandler { + logger := baseLogger.Named(capabilities.MetaCapability) + logger.Info("Setting up HCP Link Meta Service") + + grpcServer := grpc.NewServer( + grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: 2 * time.Second, + }), + grpc.MaxSendMsgSize(math.MaxInt32), + grpc.MaxRecvMsgSize(math.MaxInt32), + ) + + handler := &hcpLinkMetaHandler{ + wrappedCore: c, + logger: logger, + grpcServer: grpcServer, + scadaProvider: scadaProvider, + } + + RegisterHCPLinkMetaServer(grpcServer, handler) + reflection.Register(grpcServer) + + return handler +} + +func (h *hcpLinkMetaHandler) Start() error { + h.l.Lock() + defer h.l.Unlock() + + if h.running { + return nil + } + + // Starting meta service + metaListener, err := h.scadaProvider.Listen(capabilities.MetaCapability) + if err != nil { + return fmt.Errorf("failed to initialize meta capability listener: %w", err) + } + + if metaListener == nil { + return fmt.Errorf("no listener found for meta capability") + } + + h.logger.Info("starting HCP Link Meta Service") + // Start the gRPC server + go func() { + err = h.grpcServer.Serve(metaListener) + h.logger.Error("server closed", "error", err) + }() + + h.running = true + + return nil +} + +func (h *hcpLinkMetaHandler) Stop() error { + h.l.Lock() + defer h.l.Unlock() + + if !h.running { + return nil + } + + // Give some time for existing RPCs to drain. + time.Sleep(cluster.ListenerAcceptDeadline) + + h.logger.Info("Tearing down HCP Link Meta Service") + + if h.stopCh != nil { + close(h.stopCh) + h.stopCh = nil + } + + h.grpcServer.Stop() + + h.running = false + + return nil +} + +func (h *hcpLinkMetaHandler) ListNamespaces(ctx context.Context, req *ListNamespacesRequest) (*ListNamespacesResponse, error) { + children := h.wrappedCore.ListNamespaces(true) + + var namespaces []string + for _, child := range children { + namespaces = append(namespaces, child.Path) + } + + return &ListNamespacesResponse{ + Paths: namespaces, + }, nil +} + +func (h *hcpLinkMetaHandler) ListMounts(ctx context.Context, req *ListMountsRequest) (*ListMountsResponse, error) { + mountEntries, err := h.wrappedCore.ListMounts() + if err != nil { + return nil, err + } + + var mounts []*Mount + for _, entry := range mountEntries { + nsID := entry.NamespaceID + path := entry.Path + + if nsID != namespace.RootNamespaceID { + ns, err := h.wrappedCore.NamespaceByID(ctx, entry.NamespaceID) + if err != nil { + return nil, err + } + + path = ns.Path + path + } + + mounts = append(mounts, &Mount{ + Path: path, + Type: entry.Type, + Description: entry.Description, + }) + } + + return &ListMountsResponse{ + Mounts: mounts, + }, nil +} + +func (h *hcpLinkMetaHandler) ListAuths(ctx context.Context, req *ListAuthsRequest) (*ListAuthResponse, error) { + authEntries, err := h.wrappedCore.ListAuths() + if err != nil { + return nil, err + } + + var auths []*Auth + for _, entry := range authEntries { + nsID := entry.NamespaceID + path := entry.Path + + if nsID != namespace.RootNamespaceID { + ns, err := h.wrappedCore.NamespaceByID(ctx, entry.NamespaceID) + if err != nil { + return nil, err + } + + path = ns.Path + path + } + + auths = append(auths, &Auth{ + Path: path, + Type: entry.Type, + Description: entry.Description, + }) + } + + return &ListAuthResponse{ + Auths: auths, + }, nil +} diff --git a/vault/hcp_link/capabilities/meta/meta.pb.go b/vault/hcp_link/capabilities/meta/meta.pb.go new file mode 100644 index 000000000..790043daf --- /dev/null +++ b/vault/hcp_link/capabilities/meta/meta.pb.go @@ -0,0 +1,615 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.9 +// source: vault/hcp_link/capabilities/meta/meta.proto + +package meta + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ListNamespacesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ListNamespacesRequest) Reset() { + *x = ListNamespacesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNamespacesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNamespacesRequest) ProtoMessage() {} + +func (x *ListNamespacesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNamespacesRequest.ProtoReflect.Descriptor instead. +func (*ListNamespacesRequest) Descriptor() ([]byte, []int) { + return file_vault_hcp_link_capabilities_meta_meta_proto_rawDescGZIP(), []int{0} +} + +type ListNamespacesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Paths []string `protobuf:"bytes,1,rep,name=Paths,proto3" json:"Paths,omitempty"` +} + +func (x *ListNamespacesResponse) Reset() { + *x = ListNamespacesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNamespacesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNamespacesResponse) ProtoMessage() {} + +func (x *ListNamespacesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNamespacesResponse.ProtoReflect.Descriptor instead. +func (*ListNamespacesResponse) Descriptor() ([]byte, []int) { + return file_vault_hcp_link_capabilities_meta_meta_proto_rawDescGZIP(), []int{1} +} + +func (x *ListNamespacesResponse) GetPaths() []string { + if x != nil { + return x.Paths + } + return nil +} + +type ListMountsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ListMountsRequest) Reset() { + *x = ListMountsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMountsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMountsRequest) ProtoMessage() {} + +func (x *ListMountsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMountsRequest.ProtoReflect.Descriptor instead. +func (*ListMountsRequest) Descriptor() ([]byte, []int) { + return file_vault_hcp_link_capabilities_meta_meta_proto_rawDescGZIP(), []int{2} +} + +type Mount struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=Path,proto3" json:"Path,omitempty"` + Type string `protobuf:"bytes,2,opt,name=Type,proto3" json:"Type,omitempty"` + Description string `protobuf:"bytes,3,opt,name=Description,proto3" json:"Description,omitempty"` +} + +func (x *Mount) Reset() { + *x = Mount{} + if protoimpl.UnsafeEnabled { + mi := &file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Mount) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Mount) ProtoMessage() {} + +func (x *Mount) ProtoReflect() protoreflect.Message { + mi := &file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Mount.ProtoReflect.Descriptor instead. +func (*Mount) Descriptor() ([]byte, []int) { + return file_vault_hcp_link_capabilities_meta_meta_proto_rawDescGZIP(), []int{3} +} + +func (x *Mount) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *Mount) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Mount) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +type ListMountsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Mounts []*Mount `protobuf:"bytes,1,rep,name=Mounts,proto3" json:"Mounts,omitempty"` +} + +func (x *ListMountsResponse) Reset() { + *x = ListMountsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMountsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMountsResponse) ProtoMessage() {} + +func (x *ListMountsResponse) ProtoReflect() protoreflect.Message { + mi := &file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMountsResponse.ProtoReflect.Descriptor instead. +func (*ListMountsResponse) Descriptor() ([]byte, []int) { + return file_vault_hcp_link_capabilities_meta_meta_proto_rawDescGZIP(), []int{4} +} + +func (x *ListMountsResponse) GetMounts() []*Mount { + if x != nil { + return x.Mounts + } + return nil +} + +type ListAuthsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ListAuthsRequest) Reset() { + *x = ListAuthsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListAuthsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListAuthsRequest) ProtoMessage() {} + +func (x *ListAuthsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListAuthsRequest.ProtoReflect.Descriptor instead. +func (*ListAuthsRequest) Descriptor() ([]byte, []int) { + return file_vault_hcp_link_capabilities_meta_meta_proto_rawDescGZIP(), []int{5} +} + +type Auth struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=Path,proto3" json:"Path,omitempty"` + Type string `protobuf:"bytes,2,opt,name=Type,proto3" json:"Type,omitempty"` + Description string `protobuf:"bytes,3,opt,name=Description,proto3" json:"Description,omitempty"` +} + +func (x *Auth) Reset() { + *x = Auth{} + if protoimpl.UnsafeEnabled { + mi := &file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Auth) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Auth) ProtoMessage() {} + +func (x *Auth) ProtoReflect() protoreflect.Message { + mi := &file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Auth.ProtoReflect.Descriptor instead. +func (*Auth) Descriptor() ([]byte, []int) { + return file_vault_hcp_link_capabilities_meta_meta_proto_rawDescGZIP(), []int{6} +} + +func (x *Auth) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *Auth) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Auth) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +type ListAuthResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Auths []*Auth `protobuf:"bytes,1,rep,name=Auths,proto3" json:"Auths,omitempty"` +} + +func (x *ListAuthResponse) Reset() { + *x = ListAuthResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListAuthResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListAuthResponse) ProtoMessage() {} + +func (x *ListAuthResponse) ProtoReflect() protoreflect.Message { + mi := &file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListAuthResponse.ProtoReflect.Descriptor instead. +func (*ListAuthResponse) Descriptor() ([]byte, []int) { + return file_vault_hcp_link_capabilities_meta_meta_proto_rawDescGZIP(), []int{7} +} + +func (x *ListAuthResponse) GetAuths() []*Auth { + if x != nil { + return x.Auths + } + return nil +} + +var File_vault_hcp_link_capabilities_meta_meta_proto protoreflect.FileDescriptor + +var file_vault_hcp_link_capabilities_meta_meta_proto_rawDesc = []byte{ + 0x0a, 0x2b, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x68, 0x63, 0x70, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, + 0x2f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x2f, 0x6d, 0x65, + 0x74, 0x61, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x6d, + 0x65, 0x74, 0x61, 0x22, 0x17, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2e, 0x0a, 0x16, + 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x68, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x50, 0x61, 0x74, 0x68, 0x73, 0x22, 0x13, 0x0a, 0x11, + 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x51, 0x0a, 0x05, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x61, 0x74, 0x68, 0x12, 0x12, + 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x39, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x75, 0x6e, + 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x06, 0x4d, 0x6f, + 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x6d, 0x65, 0x74, + 0x61, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x22, + 0x12, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x75, 0x74, 0x68, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0x50, 0x0a, 0x04, 0x41, 0x75, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x50, + 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x61, 0x74, 0x68, 0x12, + 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x34, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x75, 0x74, + 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x05, 0x41, 0x75, 0x74, + 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, + 0x41, 0x75, 0x74, 0x68, 0x52, 0x05, 0x41, 0x75, 0x74, 0x68, 0x73, 0x32, 0xd8, 0x01, 0x0a, 0x0b, + 0x48, 0x43, 0x50, 0x4c, 0x69, 0x6e, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x4b, 0x0a, 0x0e, 0x4c, + 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x1b, 0x2e, + 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x6d, 0x65, 0x74, + 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, + 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x18, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x75, 0x6e, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, + 0x74, 0x41, 0x75, 0x74, 0x68, 0x73, 0x12, 0x16, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x41, 0x75, 0x74, 0x68, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3d, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, + 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x68, 0x63, 0x70, 0x5f, 0x6c, + 0x69, 0x6e, 0x6b, 0x2f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, + 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_vault_hcp_link_capabilities_meta_meta_proto_rawDescOnce sync.Once + file_vault_hcp_link_capabilities_meta_meta_proto_rawDescData = file_vault_hcp_link_capabilities_meta_meta_proto_rawDesc +) + +func file_vault_hcp_link_capabilities_meta_meta_proto_rawDescGZIP() []byte { + file_vault_hcp_link_capabilities_meta_meta_proto_rawDescOnce.Do(func() { + file_vault_hcp_link_capabilities_meta_meta_proto_rawDescData = protoimpl.X.CompressGZIP(file_vault_hcp_link_capabilities_meta_meta_proto_rawDescData) + }) + return file_vault_hcp_link_capabilities_meta_meta_proto_rawDescData +} + +var file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_vault_hcp_link_capabilities_meta_meta_proto_goTypes = []interface{}{ + (*ListNamespacesRequest)(nil), // 0: meta.ListNamespacesRequest + (*ListNamespacesResponse)(nil), // 1: meta.ListNamespacesResponse + (*ListMountsRequest)(nil), // 2: meta.ListMountsRequest + (*Mount)(nil), // 3: meta.Mount + (*ListMountsResponse)(nil), // 4: meta.ListMountsResponse + (*ListAuthsRequest)(nil), // 5: meta.ListAuthsRequest + (*Auth)(nil), // 6: meta.Auth + (*ListAuthResponse)(nil), // 7: meta.ListAuthResponse +} +var file_vault_hcp_link_capabilities_meta_meta_proto_depIdxs = []int32{ + 3, // 0: meta.ListMountsResponse.Mounts:type_name -> meta.Mount + 6, // 1: meta.ListAuthResponse.Auths:type_name -> meta.Auth + 0, // 2: meta.HCPLinkMeta.ListNamespaces:input_type -> meta.ListNamespacesRequest + 2, // 3: meta.HCPLinkMeta.ListMounts:input_type -> meta.ListMountsRequest + 5, // 4: meta.HCPLinkMeta.ListAuths:input_type -> meta.ListAuthsRequest + 1, // 5: meta.HCPLinkMeta.ListNamespaces:output_type -> meta.ListNamespacesResponse + 4, // 6: meta.HCPLinkMeta.ListMounts:output_type -> meta.ListMountsResponse + 7, // 7: meta.HCPLinkMeta.ListAuths:output_type -> meta.ListAuthResponse + 5, // [5:8] is the sub-list for method output_type + 2, // [2:5] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_vault_hcp_link_capabilities_meta_meta_proto_init() } +func file_vault_hcp_link_capabilities_meta_meta_proto_init() { + if File_vault_hcp_link_capabilities_meta_meta_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListNamespacesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListNamespacesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListMountsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Mount); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListMountsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListAuthsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Auth); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListAuthResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_vault_hcp_link_capabilities_meta_meta_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_vault_hcp_link_capabilities_meta_meta_proto_goTypes, + DependencyIndexes: file_vault_hcp_link_capabilities_meta_meta_proto_depIdxs, + MessageInfos: file_vault_hcp_link_capabilities_meta_meta_proto_msgTypes, + }.Build() + File_vault_hcp_link_capabilities_meta_meta_proto = out.File + file_vault_hcp_link_capabilities_meta_meta_proto_rawDesc = nil + file_vault_hcp_link_capabilities_meta_meta_proto_goTypes = nil + file_vault_hcp_link_capabilities_meta_meta_proto_depIdxs = nil +} diff --git a/vault/hcp_link/capabilities/meta/meta.proto b/vault/hcp_link/capabilities/meta/meta.proto new file mode 100644 index 000000000..b4c4cf617 --- /dev/null +++ b/vault/hcp_link/capabilities/meta/meta.proto @@ -0,0 +1,46 @@ +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/vault/hcp_link/capabilities/meta"; + +package meta; + +message ListNamespacesRequest {} + +message ListNamespacesResponse { + repeated string Paths = 1; +} + +message ListMountsRequest {} + +message Mount { + string Path = 1; + string Type = 2; + string Description = 3; +} + +message ListMountsResponse { + repeated Mount Mounts = 1; +} + +message ListAuthsRequest {} + +message Auth { + string Path = 1; + string Type = 2; + string Description = 3; +} + +message ListAuthResponse { + repeated Auth Auths = 1; +} + +service HCPLinkMeta { + // ListNamespaces will be used to recursively list all namespaces + rpc ListNamespaces(ListNamespacesRequest) returns (ListNamespacesResponse); + + // ListMounts will be used to recursively list all mounts in all namespaces + rpc ListMounts(ListMountsRequest) returns (ListMountsResponse); + + // ListAuths will be used to recursively list all auths in all namespaces + rpc ListAuths(ListAuthsRequest) returns (ListAuthResponse); +} \ No newline at end of file diff --git a/vault/hcp_link/capabilities/meta/meta_grpc.pb.go b/vault/hcp_link/capabilities/meta/meta_grpc.pb.go new file mode 100644 index 000000000..c6bf403c3 --- /dev/null +++ b/vault/hcp_link/capabilities/meta/meta_grpc.pb.go @@ -0,0 +1,179 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package meta + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// HCPLinkMetaClient is the client API for HCPLinkMeta service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type HCPLinkMetaClient interface { + // ListNamespaces will be used to recursively list all namespaces + ListNamespaces(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) + // ListMounts will be used to recursively list all mounts in all namespaces + ListMounts(ctx context.Context, in *ListMountsRequest, opts ...grpc.CallOption) (*ListMountsResponse, error) + // ListAuths will be used to recursively list all auths in all namespaces + ListAuths(ctx context.Context, in *ListAuthsRequest, opts ...grpc.CallOption) (*ListAuthResponse, error) +} + +type hCPLinkMetaClient struct { + cc grpc.ClientConnInterface +} + +func NewHCPLinkMetaClient(cc grpc.ClientConnInterface) HCPLinkMetaClient { + return &hCPLinkMetaClient{cc} +} + +func (c *hCPLinkMetaClient) ListNamespaces(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) { + out := new(ListNamespacesResponse) + err := c.cc.Invoke(ctx, "/meta.HCPLinkMeta/ListNamespaces", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *hCPLinkMetaClient) ListMounts(ctx context.Context, in *ListMountsRequest, opts ...grpc.CallOption) (*ListMountsResponse, error) { + out := new(ListMountsResponse) + err := c.cc.Invoke(ctx, "/meta.HCPLinkMeta/ListMounts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *hCPLinkMetaClient) ListAuths(ctx context.Context, in *ListAuthsRequest, opts ...grpc.CallOption) (*ListAuthResponse, error) { + out := new(ListAuthResponse) + err := c.cc.Invoke(ctx, "/meta.HCPLinkMeta/ListAuths", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// HCPLinkMetaServer is the server API for HCPLinkMeta service. +// All implementations must embed UnimplementedHCPLinkMetaServer +// for forward compatibility +type HCPLinkMetaServer interface { + // ListNamespaces will be used to recursively list all namespaces + ListNamespaces(context.Context, *ListNamespacesRequest) (*ListNamespacesResponse, error) + // ListMounts will be used to recursively list all mounts in all namespaces + ListMounts(context.Context, *ListMountsRequest) (*ListMountsResponse, error) + // ListAuths will be used to recursively list all auths in all namespaces + ListAuths(context.Context, *ListAuthsRequest) (*ListAuthResponse, error) + mustEmbedUnimplementedHCPLinkMetaServer() +} + +// UnimplementedHCPLinkMetaServer must be embedded to have forward compatible implementations. +type UnimplementedHCPLinkMetaServer struct { +} + +func (UnimplementedHCPLinkMetaServer) ListNamespaces(context.Context, *ListNamespacesRequest) (*ListNamespacesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNamespaces not implemented") +} +func (UnimplementedHCPLinkMetaServer) ListMounts(context.Context, *ListMountsRequest) (*ListMountsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListMounts not implemented") +} +func (UnimplementedHCPLinkMetaServer) ListAuths(context.Context, *ListAuthsRequest) (*ListAuthResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListAuths not implemented") +} +func (UnimplementedHCPLinkMetaServer) mustEmbedUnimplementedHCPLinkMetaServer() {} + +// UnsafeHCPLinkMetaServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to HCPLinkMetaServer will +// result in compilation errors. +type UnsafeHCPLinkMetaServer interface { + mustEmbedUnimplementedHCPLinkMetaServer() +} + +func RegisterHCPLinkMetaServer(s grpc.ServiceRegistrar, srv HCPLinkMetaServer) { + s.RegisterService(&HCPLinkMeta_ServiceDesc, srv) +} + +func _HCPLinkMeta_ListNamespaces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNamespacesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HCPLinkMetaServer).ListNamespaces(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/meta.HCPLinkMeta/ListNamespaces", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HCPLinkMetaServer).ListNamespaces(ctx, req.(*ListNamespacesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HCPLinkMeta_ListMounts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMountsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HCPLinkMetaServer).ListMounts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/meta.HCPLinkMeta/ListMounts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HCPLinkMetaServer).ListMounts(ctx, req.(*ListMountsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HCPLinkMeta_ListAuths_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListAuthsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HCPLinkMetaServer).ListAuths(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/meta.HCPLinkMeta/ListAuths", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HCPLinkMetaServer).ListAuths(ctx, req.(*ListAuthsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// HCPLinkMeta_ServiceDesc is the grpc.ServiceDesc for HCPLinkMeta service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var HCPLinkMeta_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "meta.HCPLinkMeta", + HandlerType: (*HCPLinkMetaServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListNamespaces", + Handler: _HCPLinkMeta_ListNamespaces_Handler, + }, + { + MethodName: "ListMounts", + Handler: _HCPLinkMeta_ListMounts_Handler, + }, + { + MethodName: "ListAuths", + Handler: _HCPLinkMeta_ListAuths_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "vault/hcp_link/capabilities/meta/meta.proto", +} diff --git a/vault/hcp_link/capabilities/node_status/node_status.go b/vault/hcp_link/capabilities/node_status/node_status.go new file mode 100644 index 000000000..31ea3725c --- /dev/null +++ b/vault/hcp_link/capabilities/node_status/node_status.go @@ -0,0 +1,54 @@ +package node_status + +import ( + "context" + + "github.com/hashicorp/hcp-link/pkg/nodestatus" + "github.com/hashicorp/vault/vault/hcp_link/internal" + "github.com/hashicorp/vault/vault/hcp_link/proto/node_status" +) + +var ( + _ nodestatus.Reporter = &NodeStatusReporter{} + Version = 1 +) + +type NodeStatusReporter struct { + NodeStatusGetter internal.WrappedCoreNodeStatus +} + +func (c *NodeStatusReporter) GetNodeStatus(ctx context.Context) (nodestatus.NodeStatus, error) { + var status nodestatus.NodeStatus + + sealStatus, err := c.NodeStatusGetter.GetSealStatus(ctx) + if err != nil { + return status, err + } + + replState := c.NodeStatusGetter.ReplicationState() + + protoRes := &node_status.LinkedClusterNodeStatusResponse{ + Type: sealStatus.Type, + Initialized: sealStatus.Initialized, + Sealed: sealStatus.Sealed, + T: int64(sealStatus.T), + N: int64(sealStatus.N), + Progress: int64(sealStatus.Progress), + Nonce: sealStatus.Nonce, + Version: sealStatus.Version, + BuildDate: sealStatus.BuildDate, + Migration: sealStatus.Migration, + ClusterID: sealStatus.ClusterID, + ClusterName: sealStatus.ClusterName, + RecoverySeal: sealStatus.RecoverySeal, + StorageType: sealStatus.StorageType, + ReplicationState: replState.StateStrings(), + } + + ns := nodestatus.NodeStatus{ + StatusVersion: uint32(Version), + Status: protoRes, + } + + return ns, nil +} diff --git a/vault/hcp_link/internal/config.go b/vault/hcp_link/internal/config.go new file mode 100644 index 000000000..c4e0c66e0 --- /dev/null +++ b/vault/hcp_link/internal/config.go @@ -0,0 +1,65 @@ +package internal + +import ( + "fmt" + + "github.com/hashicorp/go-hclog" + linkConfig "github.com/hashicorp/hcp-link/pkg/config" + "github.com/hashicorp/hcp-link/pkg/nodestatus" + scada "github.com/hashicorp/hcp-scada-provider" + cloud "github.com/hashicorp/hcp-sdk-go/clients/cloud-shared/v1/models" + sdkConfig "github.com/hashicorp/hcp-sdk-go/config" + "github.com/hashicorp/vault/internalshared/configutil" +) + +const ServiceName = "vault-link" + +func NewScadaConfig(linkConf *configutil.HCPLinkConfig, logger hclog.Logger) (*scada.Config, error) { + // getting models.HashicorpCloudLocationLink to be passed in the + // scada.config + res := linkConf.Resource.Link() + + // creating a base from the env allows for overriding the following for dev purposes: + // - auth URL: HCP_AUTH_URL + // - SCADA address: HCP_SCADA_ADDRESS + // - API address: HCP_API_ADDRESS + opts := []sdkConfig.HCPConfigOption{sdkConfig.FromEnv()} + + // client ID and client secret from config takes precedence despite + // sdkConfig.FromEnv allowing to set from env + opts = append(opts, sdkConfig.WithClientCredentials(linkConf.ClientID, linkConf.ClientSecret)) + + hcpConfig, err := sdkConfig.NewHCPConfig(opts...) + if err != nil { + return nil, fmt.Errorf("failed to create HCP config: %w", err) + } + + // Compile SCADA config + scadaConfig := &scada.Config{ + Service: ServiceName, + HCPConfig: hcpConfig, + Resource: *res, + Logger: logger, + } + return scadaConfig, nil +} + +// NewLinkConfig validates the provided values and constructs an instance of a Config. +func NewLinkConfig(nodeID string, nodeVersion string, resource cloud.HashicorpCloudLocationLink, scadaProvider scada.SCADAProvider, hcpConfig sdkConfig.HCPConfig, nodeStatusReporter nodestatus.Reporter, logger hclog.Logger) (*linkConfig.Config, error) { + config := &linkConfig.Config{ + NodeID: nodeID, + NodeVersion: nodeVersion, + HCPConfig: hcpConfig, + Resource: resource, + NodeStatusReporter: nodeStatusReporter, + SCADAProvider: scadaProvider, + Logger: logger, + } + + err := config.Validate() + if err != nil { + return nil, fmt.Errorf("failed to create the link config: %w", err) + } + + return config, nil +} diff --git a/vault/hcp_link/link.go b/vault/hcp_link/link.go new file mode 100644 index 000000000..9192603e9 --- /dev/null +++ b/vault/hcp_link/link.go @@ -0,0 +1,372 @@ +package hcp_link + +import ( + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + link "github.com/hashicorp/hcp-link" + linkConfig "github.com/hashicorp/hcp-link/pkg/config" + scada "github.com/hashicorp/hcp-scada-provider" + "github.com/hashicorp/vault/internalshared/configutil" + vaultVersion "github.com/hashicorp/vault/sdk/version" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/hcp_link/capabilities" + "github.com/hashicorp/vault/vault/hcp_link/capabilities/api_capability" + "github.com/hashicorp/vault/vault/hcp_link/capabilities/link_control" + "github.com/hashicorp/vault/vault/hcp_link/capabilities/meta" + "github.com/hashicorp/vault/vault/hcp_link/capabilities/node_status" + "github.com/hashicorp/vault/vault/hcp_link/internal" +) + +const ( + SetLinkStatusCadence = 5 * time.Second + + // metaDataNodeStatus is used to set the Scada provider metadata status + // to indicate if Vault is in active or standby status + metaDataNodeStatus = "link.node_status" + + standbyStatus = "STANDBY" + activeStatus = "ACTIVE" + perfStandbyStatus = "PERF-STANDBY" +) + +var ( + // genericScadaConnectionError is used when Vault fails to fetch + // last connection error from Scada Provider + genericScadaConnectionError = errors.New("unable to establish a connection with HCP") + invalidClientCredentials = errors.New("failed to get access token: oauth2: cannot fetch token: 401 Unauthorized") +) + +type HCPLinkVault struct { + l sync.Mutex + LinkStatus internal.WrappedCoreHCPLinkStatus + scadaConfig *scada.Config + linkConfig *linkConfig.Config + link link.Link + logger hclog.Logger + capabilities map[string]capabilities.Capability + stopCh chan struct{} + running bool +} + +func NewHCPLink(linkConf *configutil.HCPLinkConfig, core *vault.Core, logger hclog.Logger) (*HCPLinkVault, error) { + if linkConf == nil { + return nil, nil + } + + scadaLogger := logger.Named("scada") + scadaConfig, err := internal.NewScadaConfig(linkConf, scadaLogger) + if err != nil { + return nil, fmt.Errorf("failed to instantiate SCADA config, %w", err) + } + + // setting the link status in core, as link config is not nil + // At this point scada provider has not been started yet + // After starting scada provider, we need to use + // scadaProvider.SessionStatus() to get the status of the connection + core.SetHCPLinkStatus( + buildConnectionErrorMessage(scada.SessionStatusDisconnected, scada.ErrProviderNotStarted.Error(), time.Now()), + scadaConfig.Resource.Location.ProjectID, + ) + + // Creating SCADA provider + scadaProvider, err := scada.New(scadaConfig) + if err != nil { + return nil, fmt.Errorf("failed to instantiate SCADA provider: %w", err) + } + + resource := scadaConfig.Resource + hcpConfig := scadaConfig.HCPConfig + version := vaultVersion.Version + + // initializing node status reporter. This capability is configured by link lib. + statusReporter := &node_status.NodeStatusReporter{ + NodeStatusGetter: core, + } + nodeID, err := core.LoadNodeID() + if err != nil { + return nil, fmt.Errorf("failed to get nodeID, %w", err) + } + + // Compile the Link config + var conf *linkConfig.Config + conf, err = internal.NewLinkConfig( + nodeID, + version, + resource, + scadaProvider, + hcpConfig, + statusReporter, + logger, + ) + if err != nil { + return nil, fmt.Errorf("failed to instantiate Link library config: %w", err) + } + + // Create a Link library instance + hcpLink, err := link.New(conf) + if err != nil { + return nil, fmt.Errorf("failed to instantiate Link library: %w", err) + } + + hcpLinkCaps, err := initializeCapabilities(linkConf, scadaConfig, scadaProvider, core, logger) + if err != nil { + return nil, fmt.Errorf("failed to initialize capabilities: %w", err) + } + + hcpLinkVault := &HCPLinkVault{ + LinkStatus: core, + scadaConfig: scadaConfig, + linkConfig: conf, + link: hcpLink, + capabilities: hcpLinkCaps, + stopCh: make(chan struct{}), + logger: logger, + } + + // Start hcpLink and ScadaProvider + err = hcpLinkVault.start() + if err != nil { + return nil, fmt.Errorf("failed to start hcp link, %w", err) + } + + return hcpLinkVault, nil +} + +func initializeCapabilities(linkConf *configutil.HCPLinkConfig, scadaConfig *scada.Config, scadaProvider scada.SCADAProvider, core *vault.Core, logger hclog.Logger) (map[string]capabilities.Capability, error) { + hcpLinkCaps := make(map[string]capabilities.Capability, 0) + + metaCap := meta.NewHCPLinkMetaService(scadaProvider, core, logger) + hcpLinkCaps[capabilities.MetaCapability] = metaCap + + // Initializing API and link-control capabilities + var retErr *multierror.Error + if linkConf.EnableAPICapability { + apiCap, err := api_capability.NewAPICapability(scadaConfig, scadaProvider, core, logger) + if err != nil { + retErr = multierror.Append(retErr, fmt.Errorf("failed to instantiate API capability, %w", err)) + } + hcpLinkCaps[capabilities.APICapability] = apiCap + + // link control capability is tied to api capability + linkControlCap := link_control.NewHCPLinkControlService(scadaProvider, core, apiCap.PurgePolicy, logger) + hcpLinkCaps[capabilities.LinkControlCapability] = linkControlCap + + } + + // Initializing Passthrough capability + if linkConf.EnablePassThroughCapability { + apiPassCap, err := api_capability.NewAPIPassThroughCapability(scadaProvider, core, logger) + if err != nil { + retErr = multierror.Append(retErr, fmt.Errorf("failed to instantiate PassThrough capability, %w", err)) + } + hcpLinkCaps[capabilities.APIPassThroughCapability] = apiPassCap + } + + return hcpLinkCaps, retErr.ErrorOrNil() +} + +// Start the connection regardless if the node is in seal mode or not +func (h *HCPLinkVault) start() error { + h.l.Lock() + defer h.l.Unlock() + + if h.running { + return nil + } + + if h.linkConfig == nil { + return fmt.Errorf("hcpLink config has not been provided") + } + + scadaProvider := h.linkConfig.SCADAProvider + if scadaProvider == nil { + return fmt.Errorf("the reference to Scada provider in hcp link config is nil") + } + + // Start both the Link functionality and the provider + if err := h.link.Start(); err != nil { + return fmt.Errorf("failed to start Link functionality, %w", err) + } + + if err := scadaProvider.Start(); err != nil { + return fmt.Errorf("failed to start SCADA provider, %w", err) + } + + // The connection should have been established between Vault and HCP + // Update core with the status + h.LinkStatus.SetHCPLinkStatus(h.GetConnectionStatusMessage(h.GetScadaSessionStatus()), h.getResourceID()) + + // Running capabilities + err := h.RunCapabilities() + if err != nil { + h.logger.Error("failed to start HCP link capabilities", "error", err.Error()) + } + + go h.reportStatus() + + h.running = true + + h.logger.Info("started HCP Link") + + return nil +} + +// runs in a goroutine and in every 5 seconds, it sets the link status in Core +// such that a user could query the health of the connection via seal-status +// API. In addition, it checks replication status of Vault and sets that in +// Scada provider metadata status +func (h *HCPLinkVault) reportStatus() { + ticker := time.NewTicker(SetLinkStatusCadence) + defer ticker.Stop() + for { + // Check for a shutdown + select { + case <-h.stopCh: + h.logger.Trace("returning from reporting link/node status") + return + case <-ticker.C: + // setting the HCP link status in core in this cadence + h.LinkStatus.SetHCPLinkStatus( + h.GetConnectionStatusMessage(h.GetScadaSessionStatus()), + h.getResourceID(), + ) + + // if node is in standby mode, set Scada metadata to indicate that + var nodeStatus string + standby, perfStandby := h.LinkStatus.StandbyStates() + switch { + case perfStandby: + nodeStatus = perfStandbyStatus + case standby: + nodeStatus = standbyStatus + default: + nodeStatus = activeStatus + } + + h.linkConfig.SCADAProvider.UpdateMeta(map[string]string{metaDataNodeStatus: nodeStatus}) + } + } +} + +func buildConnectionErrorMessage(scadaStatus, errMsg string, errTime time.Time) string { + return fmt.Sprintf("%s since %s; error: %v", scadaStatus, errTime.Format(time.RFC3339Nano), errMsg) +} + +// GetConnectionStatusMessage returns a meaningful message about connection +// status. If Scada connection is anything other than "connected", it will +// get the LastError from ScadaProvider, and builds a message with the +// scada session status, error time and error message, and returns the message. +func (h *HCPLinkVault) GetConnectionStatusMessage(scadaStatus string) string { + if scadaStatus == scada.SessionStatusConnected { + return scadaStatus + } + + // HCP connectivity team is going to unify "connecting" with "waiting" + // statuses later. For simplicity, we unify the two until Scada + // provider unifies them + if scadaStatus == scada.SessionStatusWaiting { + scadaStatus = scada.SessionStatusConnecting + } + + // There are two other states "connecting" and "disconnected" + // For those, there could have been an error with the connection + var errToReturn string + errTime, err := h.linkConfig.SCADAProvider.LastError() + if err == nil { + err = genericScadaConnectionError + errTime = time.Now() + } + + switch { + case strings.Contains(err.Error(), scada.ErrPermissionDenied.Error()): + errToReturn = scada.ErrPermissionDenied.Error() + case strings.Contains(err.Error(), invalidClientCredentials.Error()), strings.Contains(err.Error(), scada.ErrInvalidCredentials.Error()): + errToReturn = scada.ErrInvalidCredentials.Error() + default: + errToReturn = genericScadaConnectionError.Error() + } + + return buildConnectionErrorMessage(scadaStatus, errToReturn, errTime) +} + +func (h *HCPLinkVault) getResourceID() string { + if h.scadaConfig != nil { + return h.scadaConfig.Resource.ID + } + + return "" +} + +func (h *HCPLinkVault) GetScadaSessionStatus() string { + if h.linkConfig != nil && h.linkConfig.SCADAProvider != nil { + return h.linkConfig.SCADAProvider.SessionStatus() + } + return scada.SessionStatusDisconnected +} + +func (h *HCPLinkVault) Shutdown() error { + h.l.Lock() + defer h.l.Unlock() + + if !h.running { + return nil + } + + if h.stopCh != nil { + close(h.stopCh) + h.stopCh = nil + } + + h.logger.Info("tearing down HCP Link") + + var retErr *multierror.Error + + // stopping capabilities + for capName, capability := range h.capabilities { + err := capability.Stop() + if err != nil { + retErr = multierror.Append(retErr, fmt.Errorf("failed to close capability %s, %w", capName, err)) + } + } + + // updating metaDataNodeStatus before stopping link + h.linkConfig.SCADAProvider.UpdateMeta(map[string]string{metaDataNodeStatus: ""}) + + // stopping hcp link + err := h.link.Stop() + if err != nil { + retErr = multierror.Append(err, fmt.Errorf("failed to stop link %w", err)) + } + h.link = nil + + // stopping scada provider + err = h.linkConfig.SCADAProvider.Stop() + if err != nil { + retErr = multierror.Append(err, fmt.Errorf("failed to stop scada provider %w", err)) + } + + // setting the link status in Vault + h.LinkStatus.SetHCPLinkStatus(h.GetConnectionStatusMessage(h.GetScadaSessionStatus()), h.getResourceID()) + + h.running = false + + return retErr.ErrorOrNil() +} + +func (h *HCPLinkVault) RunCapabilities() error { + var retErr *multierror.Error + for capName, capability := range h.capabilities { + err := capability.Start() + if err != nil { + retErr = multierror.Append(retErr, fmt.Errorf("failed to start capability %s, %v", capName, err)) + } + } + + return retErr.ErrorOrNil() +} diff --git a/vault/hcp_link/link_oss.go b/vault/hcp_link/link_oss.go deleted file mode 100644 index badc7d776..000000000 --- a/vault/hcp_link/link_oss.go +++ /dev/null @@ -1,23 +0,0 @@ -//go:build !enterprise - -package hcp_link - -import ( - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/internalshared/configutil" - "github.com/hashicorp/vault/vault" -) - -func NewHCPLink(linkConf *configutil.HCPLinkConfig, core *vault.Core, logger hclog.Logger) (*WrappedHCPLinkVault, error) { - return nil, nil -} - -func (h *WrappedHCPLinkVault) Shutdown() error { - return nil -} - -func (h *WrappedHCPLinkVault) GetScadaSessionStatus() string { return Disconnected } - -func (h *WrappedHCPLinkVault) GetConnectionStatusMessage(scadaStatus string) string { - return scadaStatus -} diff --git a/vault/hcp_link/proto/node_status/status.pb.go b/vault/hcp_link/proto/node_status/status.pb.go index 216158efb..783d9f6ba 100644 --- a/vault/hcp_link/proto/node_status/status.pb.go +++ b/vault/hcp_link/proto/node_status/status.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.7 +// protoc v3.21.9 // source: vault/hcp_link/proto/node_status/status.proto package node_status diff --git a/vault/hcp_link/structs.go b/vault/hcp_link/structs.go deleted file mode 100644 index 9ca057168..000000000 --- a/vault/hcp_link/structs.go +++ /dev/null @@ -1,21 +0,0 @@ -package hcp_link - -// SessionStatus is used to express the current status of the SCADA session. -type SessionStatus = string - -const ( - // Connected HCP link connection status when it is connected - Connected = SessionStatus("connected") - // Disconnected HCP link connection status when it is disconnected - Disconnected = SessionStatus("disconnected") -) - -type WrappedHCPLinkVault struct { - HCPLinkVaultInterface -} - -type HCPLinkVaultInterface interface { - Shutdown() error - GetScadaSessionStatus() string - GetConnectionStatusMessage(string) string -} diff --git a/vault/request_forwarding_service.pb.go b/vault/request_forwarding_service.pb.go index aed50fab9..d4cf83709 100644 --- a/vault/request_forwarding_service.pb.go +++ b/vault/request_forwarding_service.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.7 +// protoc v3.21.9 // source: vault/request_forwarding_service.proto package vault diff --git a/vault/tokens/token.pb.go b/vault/tokens/token.pb.go index e6ab0dd39..7fad068bd 100644 --- a/vault/tokens/token.pb.go +++ b/vault/tokens/token.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.7 +// protoc v3.21.9 // source: vault/tokens/token.proto package tokens