open-nomad/drivers/docker/fingerprint.go

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

192 lines
5.3 KiB
Go
Raw Normal View History

package docker
import (
"context"
"runtime"
"sort"
"strings"
"time"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/plugins/drivers"
pstructs "github.com/hashicorp/nomad/plugins/shared/structs"
)
func (d *Driver) Fingerprint(ctx context.Context) (<-chan *drivers.Fingerprint, error) {
client: enable support for cgroups v2 This PR introduces support for using Nomad on systems with cgroups v2 [1] enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems for Nomad users. Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer, but not so for managing cpuset cgroups. Before, Nomad has been making use of a feature in v1 where a PID could be a member of more than one cgroup. In v2 this is no longer possible, and so the logic around computing cpuset values must be modified. When Nomad detects v2, it manages cpuset values in-process, rather than making use of cgroup heirarchy inheritence via shared/reserved parents. Nomad will only activate the v2 logic when it detects cgroups2 is mounted at /sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2 mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to use the v1 logic, and should operate as before. Systems that do not support cgroups v2 are also not affected. When v2 is activated, Nomad will create a parent called nomad.slice (unless otherwise configured in Client conifg), and create cgroups for tasks using naming convention <allocID>-<task>.scope. These follow the naming convention set by systemd and also used by Docker when cgroups v2 is detected. Client nodes now export a new fingerprint attribute, unique.cgroups.version which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by Nomad. The new cpuset management strategy fixes #11705, where docker tasks that spawned processes on startup would "leak". In cgroups v2, the PIDs are started in the cgroup they will always live in, and thus the cause of the leak is eliminated. [1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html Closes #11289 Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
// Start docker reconcilers when we start fingerprinting, a workaround for
// task drivers not having a kind of post-setup hook.
d.danglingReconciler.Start()
d.cpusetFixer.Start()
ch := make(chan *drivers.Fingerprint)
go d.handleFingerprint(ctx, ch)
return ch, nil
}
func (d *Driver) previouslyDetected() bool {
d.detectedLock.RLock()
defer d.detectedLock.RUnlock()
return d.detected
}
func (d *Driver) setDetected(detected bool) {
d.detectedLock.Lock()
defer d.detectedLock.Unlock()
d.detected = detected
}
// setFingerprintSuccess marks the driver as having fingerprinted successfully
func (d *Driver) setFingerprintSuccess() {
d.fingerprintLock.Lock()
d.fingerprintSuccess = helper.BoolToPtr(true)
d.fingerprintLock.Unlock()
}
// setFingerprintFailure marks the driver as having failed fingerprinting
func (d *Driver) setFingerprintFailure() {
d.fingerprintLock.Lock()
d.fingerprintSuccess = helper.BoolToPtr(false)
d.fingerprintLock.Unlock()
}
2019-01-16 17:04:11 +00:00
// fingerprintSuccessful returns true if the driver has
// never fingerprinted or has successfully fingerprinted
func (d *Driver) fingerprintSuccessful() bool {
d.fingerprintLock.Lock()
defer d.fingerprintLock.Unlock()
return d.fingerprintSuccess == nil || *d.fingerprintSuccess
}
func (d *Driver) handleFingerprint(ctx context.Context, ch chan *drivers.Fingerprint) {
defer close(ch)
ticker := time.NewTimer(0)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-d.ctx.Done():
return
case <-ticker.C:
ticker.Reset(fingerprintPeriod)
ch <- d.buildFingerprint()
}
}
}
func (d *Driver) buildFingerprint() *drivers.Fingerprint {
fp := &drivers.Fingerprint{
Attributes: map[string]*pstructs.Attribute{},
Health: drivers.HealthStateHealthy,
HealthDescription: drivers.DriverHealthy,
}
client, _, err := d.dockerClients()
if err != nil {
2019-01-16 17:04:11 +00:00
if d.fingerprintSuccessful() {
d.logger.Info("failed to initialize client", "error", err)
}
d.setFingerprintFailure()
return &drivers.Fingerprint{
Health: drivers.HealthStateUndetected,
HealthDescription: "Failed to initialize docker client",
}
}
env, err := client.Version()
if err != nil {
2019-01-16 17:04:11 +00:00
if d.fingerprintSuccessful() {
d.logger.Debug("could not connect to docker daemon", "endpoint", client.Endpoint(), "error", err)
}
d.setFingerprintFailure()
result := drivers.HealthStateUndetected
if d.previouslyDetected() {
result = drivers.HealthStateUnhealthy
}
return &drivers.Fingerprint{
Health: result,
HealthDescription: "Failed to connect to docker daemon",
}
}
d.setDetected(true)
fp.Attributes["driver.docker"] = pstructs.NewBoolAttribute(true)
fp.Attributes["driver.docker.version"] = pstructs.NewStringAttribute(env.Get("Version"))
if d.config.AllowPrivileged {
fp.Attributes["driver.docker.privileged.enabled"] = pstructs.NewBoolAttribute(true)
}
if d.config.PidsLimit > 0 {
fp.Attributes["driver.docker.pids.limit"] = pstructs.NewIntAttribute(d.config.PidsLimit, "")
}
if d.config.Volumes.Enabled {
fp.Attributes["driver.docker.volumes.enabled"] = pstructs.NewBoolAttribute(true)
}
if nets, err := client.ListNetworks(); err != nil {
d.logger.Warn("error discovering bridge IP", "error", err)
} else {
for _, n := range nets {
if n.Name != "bridge" {
continue
}
if len(n.IPAM.Config) == 0 {
d.logger.Warn("no IPAM config for bridge network")
break
}
if n.IPAM.Config[0].Gateway != "" {
fp.Attributes["driver.docker.bridge_ip"] = pstructs.NewStringAttribute(n.IPAM.Config[0].Gateway)
} else if d.fingerprintSuccess == nil {
// Docker 17.09.0-ce dropped the Gateway IP from the bridge network
// See https://github.com/moby/moby/issues/32648
d.logger.Debug("bridge_ip could not be discovered")
}
break
}
}
if dockerInfo, err := client.Info(); err != nil {
d.logger.Warn("failed to get Docker system info", "error", err)
} else {
runtimeNames := make([]string, 0, len(dockerInfo.Runtimes))
for name := range dockerInfo.Runtimes {
if d.config.GPURuntimeName == name {
// Nvidia runtime is detected by Docker.
// It makes possible to run GPU workloads using Docker driver on this host.
d.gpuRuntime = true
}
runtimeNames = append(runtimeNames, name)
}
sort.Strings(runtimeNames)
2019-01-23 18:52:23 +00:00
fp.Attributes["driver.docker.runtimes"] = pstructs.NewStringAttribute(
strings.Join(runtimeNames, ","))
fp.Attributes["driver.docker.os_type"] = pstructs.NewStringAttribute(dockerInfo.OSType)
// If this situations arises, we are running in Windows 10 with Linux Containers enabled via VM
if runtime.GOOS == "windows" && dockerInfo.OSType == "linux" {
if d.fingerprintSuccessful() {
d.logger.Warn("Docker is configured with Linux containers; switch to Windows Containers")
}
d.setFingerprintFailure()
return &drivers.Fingerprint{
Health: drivers.HealthStateUnhealthy,
HealthDescription: "Docker is configured with Linux containers; switch to Windows Containers",
}
}
}
d.setFingerprintSuccess()
return fp
}