Better docker support: image building + better command execution (#17231)
* Refactor Docker command execution This refactor will allow others to interact with containers more easily, providing two interfaces (RunCmdWithOutput and RunCmdInBackground) for executing commands in running containers if they don't wish to do so manually. Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * Allow building containerfiles in tests By adding image building capabilities to testhelpers (and coupled with the better command execution support), we can begin to build better, more reliable integration tests on top of public base images without needing to maintain separate forks of these images out-of-tree for any shortcomings they might have. In particular, rather than doing the rather messy echo hack for writing clients.conf, it is far better to provision this via a slim Containerfile overlay on top of the stock jumanjiman/radiusd:latest image. Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * Correctly parse stdout/stderr in RunCmdWithOutput Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * ctx -> bCtx for BuildContext Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * Update errors to use %w instead of %v Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
This commit is contained in:
parent
d48e739b1d
commit
93a11a8678
|
@ -3,7 +3,6 @@ package radius
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
@ -14,8 +13,6 @@ import (
|
|||
"github.com/hashicorp/vault/helper/testhelpers/docker"
|
||||
logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -35,35 +32,6 @@ func prepareRadiusTestContainer(t *testing.T) (func(), string, int) {
|
|||
return func() {}, os.Getenv(envRadiusRadiusHost), port
|
||||
}
|
||||
|
||||
radiusdOptions := []string{"radiusd", "-f", "-l", "stdout", "-X"}
|
||||
runner, err := docker.NewServiceRunner(docker.RunOptions{
|
||||
ImageRepo: "jumanjiman/radiusd",
|
||||
ImageTag: "latest",
|
||||
ContainerName: "radiusd",
|
||||
// Switch the entry point for this operation; we want to sleep
|
||||
// instead of exec'ing radiusd, as we first need to write a new
|
||||
// client configuration. radiusd's SIGHUP handler does not reload
|
||||
// this config file, hence we choose to manually start radiusd
|
||||
// below.
|
||||
Entrypoint: []string{"sleep", "3600"},
|
||||
Ports: []string{"1812/udp"},
|
||||
LogConsumer: func(s string) {
|
||||
if t.Failed() {
|
||||
t.Logf("container logs: %s", s)
|
||||
}
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Could not start docker radiusd: %s", err)
|
||||
}
|
||||
|
||||
svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) {
|
||||
return docker.NewServiceHostPort(host, port), nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Could not start docker radiusd: %s", err)
|
||||
}
|
||||
|
||||
// Now allow any client to connect to this radiusd instance by writing our
|
||||
// own clients.conf file.
|
||||
//
|
||||
|
@ -74,7 +42,8 @@ func prepareRadiusTestContainer(t *testing.T) (func(), string, int) {
|
|||
//
|
||||
// See also: https://freeradius.org/radiusd/man/clients.conf.html
|
||||
ctx := context.Background()
|
||||
clientsConfig := `client 0.0.0.0/1 {
|
||||
clientsConfig := `
|
||||
client 0.0.0.0/1 {
|
||||
ipaddr = 0.0.0.0/1
|
||||
secret = testing123
|
||||
shortname = all-clients-first
|
||||
|
@ -84,44 +53,54 @@ client 128.0.0.0/1 {
|
|||
ipaddr = 128.0.0.0/1
|
||||
secret = testing123
|
||||
shortname = all-clients-second
|
||||
}`
|
||||
ret, err := runner.DockerAPI.ContainerExecCreate(ctx, svc.Container.ID, types.ExecConfig{
|
||||
User: "0",
|
||||
AttachStderr: true,
|
||||
AttachStdout: true,
|
||||
// Hack: write this via echo, since it exists in the container.
|
||||
Cmd: []string{"sh", "-c", "echo '" + clientsConfig + "' > /etc/raddb/clients.conf"},
|
||||
}
|
||||
`
|
||||
|
||||
containerfile := `
|
||||
FROM jumanjiman/radiusd:latest
|
||||
|
||||
COPY clients.conf /etc/raddb/clients.conf
|
||||
`
|
||||
|
||||
bCtx := docker.NewBuildContext()
|
||||
bCtx["clients.conf"] = docker.PathContentsFromBytes([]byte(clientsConfig))
|
||||
|
||||
imageName := "vault_radiusd_any_client"
|
||||
imageTag := "latest"
|
||||
|
||||
runner, err := docker.NewServiceRunner(docker.RunOptions{
|
||||
ImageRepo: imageName,
|
||||
ImageTag: imageTag,
|
||||
ContainerName: "radiusd",
|
||||
Cmd: []string{"-f", "-l", "stdout", "-X"},
|
||||
Ports: []string{"1812/udp"},
|
||||
LogConsumer: func(s string) {
|
||||
if t.Failed() {
|
||||
t.Logf("container logs: %s", s)
|
||||
}
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update radiusd client config: error creating command: %v", err)
|
||||
t.Fatalf("Could not provision docker service runner: %s", err)
|
||||
}
|
||||
resp, err := runner.DockerAPI.ContainerExecAttach(ctx, ret.ID, types.ExecStartCheck{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update radiusd client config: error attaching command: %v", err)
|
||||
}
|
||||
read, err := io.ReadAll(resp.Reader)
|
||||
t.Logf("Command Output (%v):\n%v", err, string(read))
|
||||
|
||||
ret, err = runner.DockerAPI.ContainerExecCreate(ctx, svc.Container.ID, types.ExecConfig{
|
||||
User: "0",
|
||||
AttachStderr: true,
|
||||
AttachStdout: true,
|
||||
// As noted above, we need to start radiusd manually now.
|
||||
Cmd: radiusdOptions,
|
||||
output, err := runner.BuildImage(ctx, containerfile, bCtx,
|
||||
docker.BuildRemove(true), docker.BuildForceRemove(true),
|
||||
docker.BuildPullParent(true),
|
||||
docker.BuildTags([]string{imageName + ":" + imageTag}))
|
||||
if err != nil {
|
||||
t.Fatalf("Could not build new image: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Image build output: %v", string(output))
|
||||
|
||||
svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) {
|
||||
time.Sleep(2 * time.Second)
|
||||
return docker.NewServiceHostPort(host, port), nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start radiusd service: error creating command: %v", err)
|
||||
t.Fatalf("Could not start docker radiusd: %s", err)
|
||||
}
|
||||
err = runner.DockerAPI.ContainerExecStart(ctx, ret.ID, types.ExecStartCheck{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start radiusd service: error starting command: %v", err)
|
||||
}
|
||||
|
||||
// Give radiusd time to start...
|
||||
//
|
||||
// There's no straightfoward way to check the state, but the server starts
|
||||
// up quick so a 2 second sleep should be enough.
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
pieces := strings.Split(svc.Config.Address(), ":")
|
||||
port, _ := strconv.Atoi(pieces[1])
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
|
@ -20,6 +22,7 @@ import (
|
|||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
)
|
||||
|
@ -370,3 +373,239 @@ func copyToContainer(ctx context.Context, dapi *client.Client, containerID, from
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
type RunCmdOpt interface {
|
||||
Apply(cfg *types.ExecConfig) error
|
||||
}
|
||||
|
||||
type RunCmdUser string
|
||||
|
||||
var _ RunCmdOpt = (*RunCmdUser)(nil)
|
||||
|
||||
func (u RunCmdUser) Apply(cfg *types.ExecConfig) error {
|
||||
cfg.User = string(u)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Runner) RunCmdWithOutput(ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) ([]byte, []byte, int, error) {
|
||||
runCfg := types.ExecConfig{
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
Cmd: cmd,
|
||||
}
|
||||
|
||||
for index, opt := range opts {
|
||||
if err := opt.Apply(&runCfg); err != nil {
|
||||
return nil, nil, -1, fmt.Errorf("error applying option (%d / %v): %w", index, opt, err)
|
||||
}
|
||||
}
|
||||
|
||||
ret, err := d.DockerAPI.ContainerExecCreate(ctx, container, runCfg)
|
||||
if err != nil {
|
||||
return nil, nil, -1, fmt.Errorf("error creating execution environment: %v\ncfg: %v\n", err, runCfg)
|
||||
}
|
||||
|
||||
resp, err := d.DockerAPI.ContainerExecAttach(ctx, ret.ID, types.ExecStartCheck{})
|
||||
if err != nil {
|
||||
return nil, nil, -1, fmt.Errorf("error attaching to command execution: %v\ncfg: %v\nret: %v\n", err, runCfg, ret)
|
||||
}
|
||||
defer resp.Close()
|
||||
|
||||
var stdoutB bytes.Buffer
|
||||
var stderrB bytes.Buffer
|
||||
if _, err := stdcopy.StdCopy(&stdoutB, &stderrB, resp.Reader); err != nil {
|
||||
return nil, nil, -1, fmt.Errorf("error reading command output: %v", err)
|
||||
}
|
||||
|
||||
stdout := stdoutB.Bytes()
|
||||
stderr := stderrB.Bytes()
|
||||
|
||||
// Fetch return code.
|
||||
info, err := d.DockerAPI.ContainerExecInspect(ctx, ret.ID)
|
||||
if err != nil {
|
||||
return stdout, stderr, -1, fmt.Errorf("error reading command exit code: %v", err)
|
||||
}
|
||||
|
||||
return stdout, stderr, info.ExitCode, nil
|
||||
}
|
||||
|
||||
func (d *Runner) RunCmdInBackground(ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) (string, error) {
|
||||
runCfg := types.ExecConfig{
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
Cmd: cmd,
|
||||
}
|
||||
|
||||
for index, opt := range opts {
|
||||
if err := opt.Apply(&runCfg); err != nil {
|
||||
return "", fmt.Errorf("error applying option (%d / %v): %w", index, opt, err)
|
||||
}
|
||||
}
|
||||
|
||||
ret, err := d.DockerAPI.ContainerExecCreate(ctx, container, runCfg)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error creating execution environment: %w\ncfg: %v\n", err, runCfg)
|
||||
}
|
||||
|
||||
err = d.DockerAPI.ContainerExecStart(ctx, ret.ID, types.ExecStartCheck{})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error starting command execution: %w\ncfg: %v\nret: %v\n", err, runCfg, ret)
|
||||
}
|
||||
|
||||
return ret.ID, nil
|
||||
}
|
||||
|
||||
// Mapping of path->contents
|
||||
type PathContents interface {
|
||||
UpdateHeader(header *tar.Header) error
|
||||
Get() ([]byte, error)
|
||||
}
|
||||
|
||||
type FileContents struct {
|
||||
Data []byte
|
||||
Mode int64
|
||||
}
|
||||
|
||||
func (b FileContents) UpdateHeader(header *tar.Header) error {
|
||||
header.Mode = b.Mode
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b FileContents) Get() ([]byte, error) {
|
||||
return b.Data, nil
|
||||
}
|
||||
|
||||
func PathContentsFromBytes(data []byte) PathContents {
|
||||
return FileContents{
|
||||
Data: data,
|
||||
Mode: 0o644,
|
||||
}
|
||||
}
|
||||
|
||||
type BuildContext map[string]PathContents
|
||||
|
||||
func NewBuildContext() BuildContext {
|
||||
return BuildContext{}
|
||||
}
|
||||
|
||||
func (bCtx *BuildContext) ToTarball() (io.Reader, error) {
|
||||
var err error
|
||||
buffer := new(bytes.Buffer)
|
||||
tarBuilder := tar.NewWriter(buffer)
|
||||
defer tarBuilder.Close()
|
||||
|
||||
for filepath, contents := range *bCtx {
|
||||
fileHeader := &tar.Header{Name: filepath}
|
||||
if contents == nil && !strings.HasSuffix(filepath, "/") {
|
||||
return nil, fmt.Errorf("expected file path (%v) to have trailing / due to nil contents, indicating directory", filepath)
|
||||
}
|
||||
|
||||
if err := contents.UpdateHeader(fileHeader); err != nil {
|
||||
return nil, fmt.Errorf("failed to update tar header entry for %v: %w", filepath, err)
|
||||
}
|
||||
|
||||
var rawContents []byte
|
||||
if contents != nil {
|
||||
rawContents, err = contents.Get()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get file contents for %v: %w", filepath, err)
|
||||
}
|
||||
|
||||
fileHeader.Size = int64(len(rawContents))
|
||||
}
|
||||
|
||||
if err := tarBuilder.WriteHeader(fileHeader); err != nil {
|
||||
return nil, fmt.Errorf("failed to write tar header entry for %v: %w", filepath, err)
|
||||
}
|
||||
|
||||
if contents != nil {
|
||||
if _, err := tarBuilder.Write(rawContents); err != nil {
|
||||
return nil, fmt.Errorf("failed to write tar file entry for %v: %w", filepath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return bytes.NewReader(buffer.Bytes()), nil
|
||||
}
|
||||
|
||||
type BuildOpt interface {
|
||||
Apply(cfg *types.ImageBuildOptions) error
|
||||
}
|
||||
|
||||
type BuildRemove bool
|
||||
|
||||
var _ BuildOpt = (*BuildRemove)(nil)
|
||||
|
||||
func (u BuildRemove) Apply(cfg *types.ImageBuildOptions) error {
|
||||
cfg.Remove = bool(u)
|
||||
return nil
|
||||
}
|
||||
|
||||
type BuildForceRemove bool
|
||||
|
||||
var _ BuildOpt = (*BuildForceRemove)(nil)
|
||||
|
||||
func (u BuildForceRemove) Apply(cfg *types.ImageBuildOptions) error {
|
||||
cfg.ForceRemove = bool(u)
|
||||
return nil
|
||||
}
|
||||
|
||||
type BuildPullParent bool
|
||||
|
||||
var _ BuildOpt = (*BuildPullParent)(nil)
|
||||
|
||||
func (u BuildPullParent) Apply(cfg *types.ImageBuildOptions) error {
|
||||
cfg.PullParent = bool(u)
|
||||
return nil
|
||||
}
|
||||
|
||||
type BuildArgs map[string]*string
|
||||
|
||||
var _ BuildOpt = (*BuildArgs)(nil)
|
||||
|
||||
func (u BuildArgs) Apply(cfg *types.ImageBuildOptions) error {
|
||||
cfg.BuildArgs = u
|
||||
return nil
|
||||
}
|
||||
|
||||
type BuildTags []string
|
||||
|
||||
var _ BuildOpt = (*BuildTags)(nil)
|
||||
|
||||
func (u BuildTags) Apply(cfg *types.ImageBuildOptions) error {
|
||||
cfg.Tags = u
|
||||
return nil
|
||||
}
|
||||
|
||||
const containerfilePath = "_containerfile"
|
||||
|
||||
func (d *Runner) BuildImage(ctx context.Context, containerfile string, containerContext BuildContext, opts ...BuildOpt) ([]byte, error) {
|
||||
var cfg types.ImageBuildOptions
|
||||
|
||||
// Build container context tarball, provisioning containerfile in.
|
||||
containerContext[containerfilePath] = PathContentsFromBytes([]byte(containerfile))
|
||||
tar, err := containerContext.ToTarball()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create build image context tarball: %w", err)
|
||||
}
|
||||
cfg.Dockerfile = "/" + containerfilePath
|
||||
|
||||
// Apply all given options
|
||||
for index, opt := range opts {
|
||||
if err := opt.Apply(&cfg); err != nil {
|
||||
return nil, fmt.Errorf("failed to apply option (%d / %v): %w", index, opt, err)
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := d.DockerAPI.ImageBuild(ctx, tar, cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build image: %v", err)
|
||||
}
|
||||
|
||||
output, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read image build output: %w", err)
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue