connect: use heuristic to detect sidecar task driver (#17065)

* connect: use heuristic to detect sidecar task driver

This PR adds a heuristic to detect whether to use the podman task driver
for the connect sidecar proxy. The podman driver will be selected if there
is at least one task in the task group configured to use podman, and there
are zero tasks in the group configured to use docker. In all other cases
the task driver defaults to docker.

After this change, we should be able to run typical Connect jobspecs
(e.g. nomad job init [-short] -connect) on Clusters configured with the
podman task driver, without modification to the job files.

Closes #17042

* golf: cleanup driver detection logic
This commit is contained in:
Seth Hoenig 2023-05-05 10:19:30 -05:00 committed by GitHub
parent 6ec4a69f47
commit fff2eec625
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 95 additions and 5 deletions

3
.changelog/17065.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
connect: Auto detect when to use podman for connect sidecar proxies
```

View File

@ -11,6 +11,7 @@ import (
"strings"
"time"
"github.com/hashicorp/go-set"
"github.com/hashicorp/nomad/client/taskenv"
"github.com/hashicorp/nomad/helper/envoy"
"github.com/hashicorp/nomad/helper/pointer"
@ -36,6 +37,10 @@ func connectSidecarResources() *structs.Resources {
// connectSidecarDriverConfig is the driver configuration used by the injected
// connect proxy sidecar task.
//
// Note: must be compatible with both docker and podman. One could imagine passing
// in the driver name in the future and switching on that if we need specific
// configs.
func connectSidecarDriverConfig() map[string]interface{} {
return map[string]interface{}{
"image": envoy.SidecarConfigVar,
@ -230,6 +235,23 @@ func injectPort(group *structs.TaskGroup, label string) {
})
}
// groupConnectGuessTaskDriver will scan the tasks in g and try to decide which
// task driver to use for the default sidecar proxy task definition.
//
// If there is at least one podman task and zero docker tasks, use podman.
// Otherwise default to docker.
//
// If the sidecar_task block is set, that takes precedence and this does not apply.
func groupConnectGuessTaskDriver(g *structs.TaskGroup) string {
drivers := set.FromFunc(g.Tasks, func(t *structs.Task) string {
return t.Driver
})
if drivers.Contains("podman") && !drivers.Contains("docker") {
return "podman"
}
return "docker"
}
func groupConnectHook(job *structs.Job, g *structs.TaskGroup) error {
// Create an environment interpolator with what we have at submission time.
// This should only be used to interpolate connect service names which are
@ -254,7 +276,8 @@ func groupConnectHook(job *structs.Job, g *structs.TaskGroup) error {
// If the task doesn't already exist, create a new one and add it to the job
if task == nil {
task = newConnectSidecarTask(service.Name)
driver := groupConnectGuessTaskDriver(g)
task = newConnectSidecarTask(service.Name, driver)
// If there happens to be a task defined with the same name
// append an UUID fragment to the task name
@ -477,12 +500,12 @@ func newConnectGatewayTask(prefix, service string, netHost, customizedTls bool)
}
}
func newConnectSidecarTask(service string) *structs.Task {
func newConnectSidecarTask(service, driver string) *structs.Task {
return &structs.Task{
// Name is used in container name so must start with '[A-Za-z0-9]'
Name: fmt.Sprintf("%s-%s", structs.ConnectProxyPrefix, service),
Kind: structs.NewTaskKind(structs.ConnectProxyPrefix, service),
Driver: "docker",
Driver: driver,
Config: connectSidecarDriverConfig(),
ShutdownDelay: 5 * time.Second,
LogConfig: &structs.LogConfig{

View File

@ -9,10 +9,12 @@ import (
"time"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/pointer"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/shoenig/test/must"
"github.com/stretchr/testify/require"
)
@ -57,6 +59,68 @@ func TestJobEndpointConnect_isSidecarForService(t *testing.T) {
}
}
func TestJobEndpointConnect_groupConnectGuessTaskDriver(t *testing.T) {
ci.Parallel(t)
cases := []struct {
name string
drivers []string
exp string
}{
{
name: "none",
drivers: nil,
exp: "docker",
},
{
name: "neither",
drivers: []string{"exec", "raw_exec", "rkt"},
exp: "docker",
},
{
name: "docker only",
drivers: []string{"docker"},
exp: "docker",
},
{
name: "podman only",
drivers: []string{"podman"},
exp: "podman",
},
{
name: "mix with docker",
drivers: []string{"podman", "docker", "exec"},
exp: "docker",
},
{
name: "mix without docker",
drivers: []string{"exec", "podman", "raw_exec"},
exp: "podman",
},
}
for _, tc := range cases {
tasks := helper.ConvertSlice(tc.drivers, func(driver string) *structs.Task {
return &structs.Task{Driver: driver}
})
tg := &structs.TaskGroup{Tasks: tasks}
result := groupConnectGuessTaskDriver(tg)
must.Eq(t, tc.exp, result)
}
}
func TestJobEndpointConnect_newConnectSidecarTask(t *testing.T) {
ci.Parallel(t)
task := newConnectSidecarTask("redis", "podman")
must.Eq(t, "connect-proxy-redis", task.Name)
must.Eq(t, "podman", task.Driver)
task2 := newConnectSidecarTask("db", "docker")
must.Eq(t, "connect-proxy-db", task2.Name)
must.Eq(t, "docker", task2.Driver)
}
func TestJobEndpointConnect_groupConnectHook(t *testing.T) {
ci.Parallel(t)
@ -90,8 +154,8 @@ func TestJobEndpointConnect_groupConnectHook(t *testing.T) {
// Expected tasks
tgExp := job.TaskGroups[0].Copy()
tgExp.Tasks = []*structs.Task{
newConnectSidecarTask("backend"),
newConnectSidecarTask("admin"),
newConnectSidecarTask("backend", "docker"),
newConnectSidecarTask("admin", "docker"),
}
tgExp.Services[0].Name = "backend"
tgExp.Services[1].Name = "admin"