Add dns resolver to PKI Binary Cluster (#20485)
* Export DockerAPI for use by other consumers As usage of DockerCluster gets more advanced, some users may want to interact with the container nodes of the cluster. While, if you already have a DockerAPI instance lying around you can reuse that safely, for use cases where an existing e.g., docker/testhelpers's runner instance is not available, reusing the existing cluster's DockerAPI is easiest. Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * Add ability to exec commands without runner When modifying DockerTestCluster's containers manually, we might not have a Runner instance; instead, expose the ability to run commands via a DockerAPI instance directly, as they're awfully convenient. Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * Add DNS resolver into ACME tests This updates the pkiext_binary tests to use an adjacent DNS resolver, allowing these tests to eventually be extended to solve DNS challenges, as modifying the /etc/hosts file does not allow this. Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * Fix loading DNS resolver onto network Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * Fix bug with DNS configuration validation Both conditionals here were inverted: address being empty means a bad specification was given, and the parse being nil means that it was not a valid IP address. Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * Fix specifying TXT records, allow removing records Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> --------- Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
This commit is contained in:
parent
b4bec9bd30
commit
d8c5456f8a
|
@ -18,6 +18,7 @@ type TestServer struct {
|
|||
ctx context.Context
|
||||
|
||||
runner *docker.Runner
|
||||
network string
|
||||
startup *docker.Service
|
||||
|
||||
serial int
|
||||
|
@ -38,9 +39,10 @@ func SetupResolverOnNetwork(t *testing.T, domain string, network string) *TestSe
|
|||
ts.ctx = context.Background()
|
||||
ts.domains = []string{domain}
|
||||
ts.records = map[string]map[string][]string{}
|
||||
ts.network = network
|
||||
|
||||
ts.setupRunner(domain, network)
|
||||
ts.startContainer()
|
||||
ts.startContainer(network)
|
||||
ts.PushConfig()
|
||||
|
||||
return &ts
|
||||
|
@ -61,7 +63,7 @@ func (ts *TestServer) setupRunner(domain string, network string) {
|
|||
require.NoError(ts.t, err)
|
||||
}
|
||||
|
||||
func (ts *TestServer) startContainer() {
|
||||
func (ts *TestServer) startContainer(network string) {
|
||||
connUpFunc := func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) {
|
||||
// Perform a simple connection to this resolver, even though the
|
||||
// default configuration doesn't do anything useful.
|
||||
|
@ -85,9 +87,26 @@ func (ts *TestServer) startContainer() {
|
|||
return docker.NewServiceHostPort(host, port), nil
|
||||
}
|
||||
|
||||
result, err := ts.runner.StartService(ts.ctx, connUpFunc)
|
||||
result, _, err := ts.runner.StartNewService(ts.ctx, true, true, connUpFunc)
|
||||
require.NoError(ts.t, err, "failed to start dns resolver for "+ts.domains[0])
|
||||
ts.startup = result
|
||||
|
||||
if ts.startup.StartResult.RealIP == "" {
|
||||
mapping, err := ts.runner.GetNetworkAndAddresses(ts.startup.Container.ID)
|
||||
require.NoError(ts.t, err, "failed to fetch network addresses to correct missing real IP address")
|
||||
if len(network) == 0 {
|
||||
require.Equal(ts.t, 1, len(mapping), "expected exactly one network address")
|
||||
for network = range mapping {
|
||||
// Because mapping is a map of network name->ip, we need
|
||||
// to use the above range's assignment to get the name,
|
||||
// as there is no other way of getting the keys of a map.
|
||||
}
|
||||
}
|
||||
require.Contains(ts.t, mapping, network, "expected network to be part of the mapping")
|
||||
ts.startup.StartResult.RealIP = mapping[network]
|
||||
}
|
||||
|
||||
ts.t.Logf("[dnsserv] Addresses of DNS resolver: local=%v / container=%v", ts.GetLocalAddr(), ts.GetRemoteAddr())
|
||||
}
|
||||
|
||||
func (ts *TestServer) buildNamedConf() string {
|
||||
|
|
|
@ -188,10 +188,10 @@ func (b *backend) pathAcmeWrite(ctx context.Context, req *logical.Request, d *fr
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse DNS resolver address: %w", err)
|
||||
}
|
||||
if addr != "" {
|
||||
if addr == "" {
|
||||
return nil, fmt.Errorf("failed to parse DNS resolver address: got empty address")
|
||||
}
|
||||
if net.ParseIP(addr) != nil {
|
||||
if net.ParseIP(addr) == nil {
|
||||
return nil, fmt.Errorf("failed to parse DNS resolver address: expected IPv4/IPv6 address, likely got hostname")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
// a bunch of sub-tests against that cluster. It is up to each sub-test to run/configure
|
||||
// a new pki mount within the cluster to not interfere with each other.
|
||||
func Test_ACME(t *testing.T) {
|
||||
cluster := NewVaultPkiCluster(t)
|
||||
cluster := NewVaultPkiClusterWithDNS(t)
|
||||
defer cluster.Cleanup()
|
||||
|
||||
tc := map[string]func(t *testing.T, cluster *VaultPkiCluster){
|
||||
|
@ -91,7 +91,7 @@ func SubtestACMECertbot(t *testing.T, cluster *VaultPkiCluster) {
|
|||
ipAddr := networks[vaultNetwork]
|
||||
hostname := "acme-client.dadgarcorp.com"
|
||||
|
||||
err = pki.AddNameToHostsFile(ipAddr, hostname, logConsumer, logStdout, logStderr)
|
||||
err = pki.AddHostname(hostname, ipAddr)
|
||||
require.NoError(t, err, "failed to update vault host files")
|
||||
|
||||
certbotCmd := []string{
|
||||
|
@ -197,7 +197,7 @@ func SubTestACMEIPAndDNS(t *testing.T, cluster *VaultPkiCluster) {
|
|||
ipAddr := networks[pki.GetContainerNetworkName()]
|
||||
hostname := "go-lang-acme-client.dadgarcorp.com"
|
||||
|
||||
err = pki.AddNameToHostsFile(ipAddr, hostname, logConsumer, logStdout, logStderr)
|
||||
err = pki.AddHostname(hostname, ipAddr)
|
||||
require.NoError(t, err, "failed to update vault host files")
|
||||
|
||||
// Perform an ACME lifecycle with an order that contains both an IP and a DNS name identifier
|
||||
|
|
|
@ -6,20 +6,19 @@ package pkiext_binary
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
dockhelper "github.com/hashicorp/vault/sdk/helper/docker"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster"
|
||||
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/builtin/logical/pki/dnstest"
|
||||
dockhelper "github.com/hashicorp/vault/sdk/helper/docker"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster/docker"
|
||||
)
|
||||
|
||||
type VaultPkiCluster struct {
|
||||
cluster *docker.DockerCluster
|
||||
Dns *dnstest.TestServer
|
||||
}
|
||||
|
||||
func NewVaultPkiCluster(t *testing.T) *VaultPkiCluster {
|
||||
|
@ -47,8 +46,18 @@ func NewVaultPkiCluster(t *testing.T) *VaultPkiCluster {
|
|||
return &VaultPkiCluster{cluster: cluster}
|
||||
}
|
||||
|
||||
func NewVaultPkiClusterWithDNS(t *testing.T) *VaultPkiCluster {
|
||||
cluster := NewVaultPkiCluster(t)
|
||||
dns := dnstest.SetupResolverOnNetwork(t, "dadgarcorp.com", cluster.GetContainerNetworkName())
|
||||
cluster.Dns = dns
|
||||
return cluster
|
||||
}
|
||||
|
||||
func (vpc *VaultPkiCluster) Cleanup() {
|
||||
vpc.cluster.Cleanup()
|
||||
if vpc.Dns != nil {
|
||||
vpc.Dns.Cleanup()
|
||||
}
|
||||
}
|
||||
|
||||
func (vpc *VaultPkiCluster) GetActiveContainerHostPort() string {
|
||||
|
@ -71,27 +80,24 @@ func (vpc *VaultPkiCluster) GetActiveNode() *api.Client {
|
|||
return vpc.cluster.Nodes()[0].APIClient()
|
||||
}
|
||||
|
||||
func (vpc *VaultPkiCluster) AddNameToHostsFile(ip, hostname string, logConsumer func(string), logStdout, logStderr io.Writer) error {
|
||||
func (vpc *VaultPkiCluster) AddHostname(hostname, ip string) error {
|
||||
if vpc.Dns != nil {
|
||||
vpc.Dns.AddRecord(hostname, "A", ip)
|
||||
vpc.Dns.PushConfig()
|
||||
return nil
|
||||
} else {
|
||||
return vpc.AddNameToHostFiles(hostname, ip)
|
||||
}
|
||||
}
|
||||
|
||||
func (vpc *VaultPkiCluster) AddNameToHostFiles(hostname, ip string) error {
|
||||
updateHostsCmd := []string{
|
||||
"sh", "-c",
|
||||
"echo '" + ip + " " + hostname + "' >> /etc/hosts",
|
||||
}
|
||||
for _, node := range vpc.cluster.ClusterNodes {
|
||||
containerID := node.Container.ID
|
||||
runner, err := dockhelper.NewServiceRunner(dockhelper.RunOptions{
|
||||
ImageRepo: node.ImageRepo,
|
||||
ImageTag: node.ImageTag,
|
||||
ContainerName: containerID,
|
||||
NetworkName: node.ContainerNetworkName,
|
||||
LogConsumer: logConsumer,
|
||||
LogStdout: logStdout,
|
||||
LogStderr: logStderr,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, _, retcode, err := runner.RunCmdWithOutput(context.Background(), containerID, updateHostsCmd)
|
||||
_, _, retcode, err := dockhelper.RunCmdWithOutput(vpc.cluster.DockerAPI, context.Background(), containerID, updateHostsCmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed updating container %s host file: %w", containerID, err)
|
||||
}
|
||||
|
@ -104,6 +110,25 @@ func (vpc *VaultPkiCluster) AddNameToHostsFile(ip, hostname string, logConsumer
|
|||
return nil
|
||||
}
|
||||
|
||||
func (vpc *VaultPkiCluster) AddDNSRecord(hostname, recordType, ip string) error {
|
||||
if vpc.Dns == nil {
|
||||
return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to provision custom records")
|
||||
}
|
||||
|
||||
vpc.Dns.AddRecord(hostname, recordType, ip)
|
||||
vpc.Dns.PushConfig()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vpc *VaultPkiCluster) RemoveAllDNSRecords() error {
|
||||
if vpc.Dns == nil {
|
||||
return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to remove all records")
|
||||
}
|
||||
|
||||
vpc.Dns.RemoveAllRecords()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vpc *VaultPkiCluster) CreateMount(name string) (*VaultPkiMount, error) {
|
||||
err := vpc.GetActiveNode().Sys().Mount(name, &api.MountInput{
|
||||
Type: "pki",
|
||||
|
@ -137,7 +162,12 @@ func (vpc *VaultPkiCluster) CreateAcmeMount(mountName string) (*VaultPkiMount, e
|
|||
return nil, fmt.Errorf("failed updating cluster config: %w", err)
|
||||
}
|
||||
|
||||
err = pki.UpdateAcmeConfig(true, nil)
|
||||
cfg := map[string]interface{}{}
|
||||
if vpc.Dns != nil {
|
||||
cfg["dns_resolver"] = vpc.Dns.GetRemoteAddr()
|
||||
}
|
||||
|
||||
err = pki.UpdateAcmeConfig(true, cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed updating acme config: %w", err)
|
||||
}
|
||||
|
|
|
@ -561,6 +561,10 @@ func (u RunCmdUser) Apply(cfg *types.ExecConfig) error {
|
|||
}
|
||||
|
||||
func (d *Runner) RunCmdWithOutput(ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) ([]byte, []byte, int, error) {
|
||||
return RunCmdWithOutput(d.DockerAPI, ctx, container, cmd, opts...)
|
||||
}
|
||||
|
||||
func RunCmdWithOutput(api *client.Client, ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) ([]byte, []byte, int, error) {
|
||||
runCfg := types.ExecConfig{
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
|
@ -573,12 +577,12 @@ func (d *Runner) RunCmdWithOutput(ctx context.Context, container string, cmd []s
|
|||
}
|
||||
}
|
||||
|
||||
ret, err := d.DockerAPI.ContainerExecCreate(ctx, container, runCfg)
|
||||
ret, err := api.ContainerExecCreate(ctx, container, runCfg)
|
||||
if err != nil {
|
||||
return nil, nil, -1, fmt.Errorf("error creating execution environment: %v\ncfg: %v\n", err, runCfg)
|
||||
}
|
||||
|
||||
resp, err := d.DockerAPI.ContainerExecAttach(ctx, ret.ID, types.ExecStartCheck{})
|
||||
resp, err := api.ContainerExecAttach(ctx, ret.ID, types.ExecStartCheck{})
|
||||
if err != nil {
|
||||
return nil, nil, -1, fmt.Errorf("error attaching to command execution: %v\ncfg: %v\nret: %v\n", err, runCfg, ret)
|
||||
}
|
||||
|
@ -594,7 +598,7 @@ func (d *Runner) RunCmdWithOutput(ctx context.Context, container string, cmd []s
|
|||
stderr := stderrB.Bytes()
|
||||
|
||||
// Fetch return code.
|
||||
info, err := d.DockerAPI.ContainerExecInspect(ctx, ret.ID)
|
||||
info, err := api.ContainerExecInspect(ctx, ret.ID)
|
||||
if err != nil {
|
||||
return stdout, stderr, -1, fmt.Errorf("error reading command exit code: %v", err)
|
||||
}
|
||||
|
@ -603,6 +607,10 @@ func (d *Runner) RunCmdWithOutput(ctx context.Context, container string, cmd []s
|
|||
}
|
||||
|
||||
func (d *Runner) RunCmdInBackground(ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) (string, error) {
|
||||
return RunCmdInBackground(d.DockerAPI, ctx, container, cmd, opts...)
|
||||
}
|
||||
|
||||
func RunCmdInBackground(api *client.Client, ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) (string, error) {
|
||||
runCfg := types.ExecConfig{
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
|
@ -615,12 +623,12 @@ func (d *Runner) RunCmdInBackground(ctx context.Context, container string, cmd [
|
|||
}
|
||||
}
|
||||
|
||||
ret, err := d.DockerAPI.ContainerExecCreate(ctx, container, runCfg)
|
||||
ret, err := api.ContainerExecCreate(ctx, container, runCfg)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error creating execution environment: %w\ncfg: %v\n", err, runCfg)
|
||||
}
|
||||
|
||||
err = d.DockerAPI.ContainerExecStart(ctx, ret.ID, types.ExecStartCheck{})
|
||||
err = api.ContainerExecStart(ctx, ret.ID, types.ExecStartCheck{})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error starting command execution: %w\ncfg: %v\nret: %v\n", err, runCfg, ret)
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ type DockerCluster struct {
|
|||
// rootToken is the initial root token created when the Vault cluster is
|
||||
// created.
|
||||
rootToken string
|
||||
dockerAPI *docker.Client
|
||||
DockerAPI *docker.Client
|
||||
ID string
|
||||
Logger log.Logger
|
||||
builtTags map[string]struct{}
|
||||
|
@ -434,7 +434,7 @@ func NewDockerCluster(ctx context.Context, opts *DockerClusterOptions) (*DockerC
|
|||
}
|
||||
|
||||
dc := &DockerCluster{
|
||||
dockerAPI: api,
|
||||
DockerAPI: api,
|
||||
RaftStorage: true,
|
||||
ClusterName: opts.ClusterName,
|
||||
Logger: opts.Logger,
|
||||
|
@ -466,7 +466,7 @@ type DockerClusterNode struct {
|
|||
WorkDir string
|
||||
Cluster *DockerCluster
|
||||
Container *types.ContainerJSON
|
||||
dockerAPI *docker.Client
|
||||
DockerAPI *docker.Client
|
||||
runner *dockhelper.Runner
|
||||
Logger log.Logger
|
||||
cleanupContainer func()
|
||||
|
@ -563,13 +563,13 @@ func (n *DockerClusterNode) cleanup() error {
|
|||
|
||||
func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOptions) error {
|
||||
if n.DataVolumeName == "" {
|
||||
vol, err := n.dockerAPI.VolumeCreate(ctx, volume.CreateOptions{})
|
||||
vol, err := n.DockerAPI.VolumeCreate(ctx, volume.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.DataVolumeName = vol.Name
|
||||
n.cleanupVolume = func() {
|
||||
_ = n.dockerAPI.VolumeRemove(ctx, vol.Name, false)
|
||||
_ = n.DockerAPI.VolumeRemove(ctx, vol.Name, false)
|
||||
}
|
||||
}
|
||||
vaultCfg := map[string]interface{}{}
|
||||
|
@ -899,7 +899,7 @@ func (dc *DockerCluster) addNode(ctx context.Context, opts *DockerClusterOptions
|
|||
i := len(dc.ClusterNodes)
|
||||
nodeID := fmt.Sprintf("core-%d", i)
|
||||
node := &DockerClusterNode{
|
||||
dockerAPI: dc.dockerAPI,
|
||||
DockerAPI: dc.DockerAPI,
|
||||
NodeID: nodeID,
|
||||
Cluster: dc,
|
||||
WorkDir: filepath.Join(dc.tmpDir, nodeID),
|
||||
|
@ -987,7 +987,7 @@ FROM %s:%s
|
|||
COPY vault /bin/vault
|
||||
`, opts.ImageRepo, sourceTag)
|
||||
|
||||
_, err = dockhelper.BuildImage(ctx, dc.dockerAPI, containerFile, bCtx,
|
||||
_, err = dockhelper.BuildImage(ctx, dc.DockerAPI, containerFile, bCtx,
|
||||
dockhelper.BuildRemove(true), dockhelper.BuildForceRemove(true),
|
||||
dockhelper.BuildPullParent(true),
|
||||
dockhelper.BuildTags([]string{opts.ImageRepo + ":" + tag}))
|
||||
|
|
Loading…
Reference in New Issue