Add pki zlint tests (#17305)
* Add tests for zlint-clean CA building This test ensures that we can consistently pass ZLint's CA linting tests on a root certificate generated by Vault. In particular, nominal requirements are placed on the structure on the issuer's Subject, which we supply, and the remaining requirements pass. The one exception is we include both RFC and CA/BF BR lints in the default zlint checks; this means ECDSA P-521 (which isn't accepted by Mozilla's root store policies) is rejected, so we ignore to lints related to that. Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * Add options to copy to/from container, fix stopping Stopping the container takes a bit of time for some unknown reason so I've instead opted to shorten the sleep in the zlint tests to avoid consuming resources too long after the test finish. Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * Make zlint tests execute in parallel This improves the overall test time of the zlint tests, making the container build up front once (provisioning zlint), and then copying the cert into the new container image later. Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> * make fmt Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com> Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
This commit is contained in:
parent
60bf4706e5
commit
daf29de742
|
@ -0,0 +1,188 @@
|
|||
package pki
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/vault/helper/testhelpers/docker"
|
||||
)
|
||||
|
||||
var (
|
||||
runner *docker.Runner
|
||||
buildZLintOnce sync.Once
|
||||
)
|
||||
|
||||
func buildZLintContainer(t *testing.T) {
|
||||
containerfile := `
|
||||
FROM golang:latest
|
||||
|
||||
RUN go install github.com/zmap/zlint/v3/cmd/zlint@latest
|
||||
`
|
||||
|
||||
bCtx := docker.NewBuildContext()
|
||||
|
||||
imageName := "vault_pki_zlint_validator"
|
||||
imageTag := "latest"
|
||||
|
||||
var err error
|
||||
runner, err = docker.NewServiceRunner(docker.RunOptions{
|
||||
ImageRepo: imageName,
|
||||
ImageTag: imageTag,
|
||||
ContainerName: "pki_zlint",
|
||||
// We want to run sleep in the background so we're not stuck waiting
|
||||
// for the default golang container's shell to prompt for input.
|
||||
Entrypoint: []string{"sleep", "45"},
|
||||
LogConsumer: func(s string) {
|
||||
if t.Failed() {
|
||||
t.Logf("container logs: %s", s)
|
||||
}
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Could not provision docker service runner: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
output, err := runner.BuildImage(ctx, containerfile, bCtx,
|
||||
docker.BuildRemove(true), docker.BuildForceRemove(true),
|
||||
docker.BuildPullParent(true),
|
||||
docker.BuildTags([]string{imageName + ":" + imageTag}))
|
||||
if err != nil {
|
||||
t.Fatalf("Could not build new image: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Image build output: %v", string(output))
|
||||
}
|
||||
|
||||
func RunZLintContainer(t *testing.T, certificate string) []byte {
|
||||
buildZLintOnce.Do(func() {
|
||||
buildZLintContainer(t)
|
||||
})
|
||||
|
||||
// We don't actually care about the address, we just want to start the
|
||||
// container so we can run commands in it. We'd ideally like to skip this
|
||||
// step and only build a new image, but the zlint output would be
|
||||
// intermingled with container build stages, so its not that useful.
|
||||
ctr, _, _, err := runner.Start(ctx, true, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not start golang container for zlint: %s", err)
|
||||
}
|
||||
|
||||
// Copy the cert into the newly running container.
|
||||
certCtx := docker.NewBuildContext()
|
||||
certCtx["cert.pem"] = docker.PathContentsFromBytes([]byte(certificate))
|
||||
if err := runner.CopyTo(ctr.ID, "/go/", certCtx); err != nil {
|
||||
t.Fatalf("Could not copy certificate into container: %v", err)
|
||||
}
|
||||
|
||||
// Run the zlint command and save the output.
|
||||
cmd := []string{"/go/bin/zlint", "/go/cert.pem"}
|
||||
stdout, stderr, retcode, err := runner.RunCmdWithOutput(ctx, ctr.ID, cmd)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not run command in container: %v", err)
|
||||
}
|
||||
|
||||
if len(stderr) != 0 {
|
||||
t.Logf("Got stderr from command:\n%v\n", string(stderr))
|
||||
}
|
||||
|
||||
if retcode != 0 {
|
||||
t.Logf("Got stdout from command:\n%v\n", string(stdout))
|
||||
t.Fatalf("Got unexpected non-zero retcode from zlint: %v\n", retcode)
|
||||
}
|
||||
|
||||
// Clean up after ourselves.
|
||||
if err := runner.Stop(context.Background(), ctr.ID); err != nil {
|
||||
t.Fatalf("failed to stop container: %v", err)
|
||||
}
|
||||
|
||||
return stdout
|
||||
}
|
||||
|
||||
func RunZLintRootTest(t *testing.T, keyType string, keyBits int, usePSS bool, ignored []string) {
|
||||
b, s := createBackendWithStorage(t)
|
||||
|
||||
resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{
|
||||
"common_name": "Root X1",
|
||||
"country": "US",
|
||||
"organization": "Dadgarcorp",
|
||||
"ou": "QA",
|
||||
"key_type": keyType,
|
||||
"key_bits": keyBits,
|
||||
"use_pss": usePSS,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
rootCert := resp.Data["certificate"].(string)
|
||||
|
||||
var parsed map[string]interface{}
|
||||
output := RunZLintContainer(t, rootCert)
|
||||
|
||||
if err := json.Unmarshal(output, &parsed); err != nil {
|
||||
t.Fatalf("failed to parse zlint output as JSON: %v\nOutput:\n%v\n\n", err, string(output))
|
||||
}
|
||||
|
||||
for key, rawValue := range parsed {
|
||||
value := rawValue.(map[string]interface{})
|
||||
result, ok := value["result"]
|
||||
if !ok || result == "NA" {
|
||||
continue
|
||||
}
|
||||
|
||||
if result == "error" {
|
||||
skip := false
|
||||
for _, allowedFailures := range ignored {
|
||||
if allowedFailures == key {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !skip {
|
||||
t.Fatalf("got unexpected error from test %v: %v", key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ZLintRSA2048(t *testing.T) {
|
||||
t.Parallel()
|
||||
RunZLintRootTest(t, "rsa", 2048, false, nil)
|
||||
}
|
||||
|
||||
func Test_ZLintRSA2048PSS(t *testing.T) {
|
||||
t.Parallel()
|
||||
RunZLintRootTest(t, "rsa", 2048, true, nil)
|
||||
}
|
||||
|
||||
func Test_ZLintRSA3072(t *testing.T) {
|
||||
t.Parallel()
|
||||
RunZLintRootTest(t, "rsa", 3072, false, nil)
|
||||
}
|
||||
|
||||
func Test_ZLintRSA3072PSS(t *testing.T) {
|
||||
t.Parallel()
|
||||
RunZLintRootTest(t, "rsa", 3072, true, nil)
|
||||
}
|
||||
|
||||
func Test_ZLintECDSA256(t *testing.T) {
|
||||
t.Parallel()
|
||||
RunZLintRootTest(t, "ec", 256, false, nil)
|
||||
}
|
||||
|
||||
func Test_ZLintECDSA384(t *testing.T) {
|
||||
t.Parallel()
|
||||
RunZLintRootTest(t, "ec", 384, false, nil)
|
||||
}
|
||||
|
||||
func Test_ZLintECDSA521(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Mozilla doesn't allow P-521 ECDSA keys.
|
||||
RunZLintRootTest(t, "ec", 521, false, []string{
|
||||
"e_mp_ecdsa_pub_key_encoding_correct",
|
||||
"e_mp_ecdsa_signature_encoding_correct",
|
||||
})
|
||||
}
|
|
@ -327,12 +327,18 @@ func (d *Runner) Start(ctx context.Context, addSuffix, forceLocalAddr bool) (*ty
|
|||
}
|
||||
|
||||
func (d *Runner) Stop(ctx context.Context, containerID string) error {
|
||||
timeout := 5 * time.Second
|
||||
if err := d.DockerAPI.ContainerStop(ctx, containerID, &timeout); err != nil {
|
||||
return err
|
||||
if d.RunOptions.NetworkID != "" {
|
||||
if err := d.DockerAPI.NetworkDisconnect(ctx, d.RunOptions.NetworkID, containerID, true); err != nil {
|
||||
return fmt.Errorf("error disconnecting network (%v): %v", d.RunOptions.NetworkID, err)
|
||||
}
|
||||
}
|
||||
|
||||
return d.DockerAPI.NetworkDisconnect(ctx, d.RunOptions.NetworkID, containerID, true)
|
||||
timeout := 5 * time.Second
|
||||
if err := d.DockerAPI.ContainerStop(ctx, containerID, &timeout); err != nil {
|
||||
return fmt.Errorf("error stopping container: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Runner) Restart(ctx context.Context, containerID string) error {
|
||||
|
@ -464,10 +470,14 @@ type PathContents interface {
|
|||
type FileContents struct {
|
||||
Data []byte
|
||||
Mode int64
|
||||
UID int
|
||||
GID int
|
||||
}
|
||||
|
||||
func (b FileContents) UpdateHeader(header *tar.Header) error {
|
||||
header.Mode = b.Mode
|
||||
header.Uid = b.UID
|
||||
header.Gid = b.GID
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -488,6 +498,41 @@ func NewBuildContext() BuildContext {
|
|||
return BuildContext{}
|
||||
}
|
||||
|
||||
func BuildContextFromTarball(reader io.Reader) (BuildContext, error) {
|
||||
archive := tar.NewReader(reader)
|
||||
bCtx := NewBuildContext()
|
||||
|
||||
for true {
|
||||
header, err := archive.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to parse provided tarball: %v")
|
||||
}
|
||||
|
||||
data := make([]byte, int(header.Size))
|
||||
read, err := archive.Read(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse read from provided tarball: %v")
|
||||
}
|
||||
|
||||
if read != int(header.Size) {
|
||||
return nil, fmt.Errorf("unexpectedly short read on tarball: %v of %v", read, header.Size)
|
||||
}
|
||||
|
||||
bCtx[header.Name] = FileContents{
|
||||
Data: data,
|
||||
Mode: header.Mode,
|
||||
UID: header.Uid,
|
||||
GID: header.Gid,
|
||||
}
|
||||
}
|
||||
|
||||
return bCtx, nil
|
||||
}
|
||||
|
||||
func (bCtx *BuildContext) ToTarball() (io.Reader, error) {
|
||||
var err error
|
||||
buffer := new(bytes.Buffer)
|
||||
|
@ -609,3 +654,31 @@ func (d *Runner) BuildImage(ctx context.Context, containerfile string, container
|
|||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (d *Runner) CopyTo(container string, destination string, contents BuildContext) error {
|
||||
// XXX: currently we use the default options but we might want to allow
|
||||
// modifying cfg.CopyUIDGID in the future.
|
||||
var cfg types.CopyToContainerOptions
|
||||
|
||||
// Convert our provided contents to a tarball to ship up.
|
||||
tar, err := contents.ToTarball()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build contents into tarball: %v", err)
|
||||
}
|
||||
|
||||
return d.DockerAPI.CopyToContainer(context.Background(), container, destination, tar, cfg)
|
||||
}
|
||||
|
||||
func (d *Runner) CopyFrom(container string, source string) (BuildContext, *types.ContainerPathStat, error) {
|
||||
reader, stat, err := d.DockerAPI.CopyFromContainer(context.Background(), container, source)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to read %v from container: %v", source, err)
|
||||
}
|
||||
|
||||
result, err := BuildContextFromTarball(reader)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to build archive from result: %v", err)
|
||||
}
|
||||
|
||||
return result, &stat, nil
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue