Merge branch 'master' into f-docker-any-network

This commit is contained in:
Chris Bednarski 2015-11-17 17:22:13 -08:00
commit 122220a424
6 changed files with 198 additions and 117 deletions

View file

@ -41,12 +41,16 @@ IMPROVEMENTS:
* client: Test Skip Detection [GH-221] * client: Test Skip Detection [GH-221]
* driver/docker: Advanced docker driver options [GH-390] * driver/docker: Advanced docker driver options [GH-390]
* driver/docker: Docker hostname can be set [GH-426] * driver/docker: Docker hostname can be set [GH-426]
<<<<<<< Updated upstream
* driver/docker: Mount task local and alloc directory to docker containers * driver/docker: Mount task local and alloc directory to docker containers
[GH-290] [GH-290]
* driver/docker: Pass JVM options in java driver [GH-293, GH-297] * driver/docker: Pass JVM options in java driver [GH-293, GH-297]
* drivers: Use BlkioWeight rather than BlkioThrottleReadIopsDevice [GH-222] * drivers: Use BlkioWeight rather than BlkioThrottleReadIopsDevice [GH-222]
* jobspec and drivers: Driver configuration supports arbitrary struct to be * jobspec and drivers: Driver configuration supports arbitrary struct to be
passed in jobspec [GH-415] passed in jobspec [GH-415]
=======
* driver/docker: Docker container name can be set [GH-389]
>>>>>>> Stashed changes
BUG FIXES: BUG FIXES:

View file

@ -1,7 +1,9 @@
package config package config
import ( import (
"fmt"
"io" "io"
"strconv"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
) )
@ -73,3 +75,26 @@ func (c *Config) ReadDefault(id string, defaultValue string) string {
} }
return defaultValue return defaultValue
} }
// ReadBool parses the specified option as a boolean.
func (c *Config) ReadBool(id string) (bool, error) {
val, ok := c.Options[id]
if !ok {
return false, fmt.Errorf("Specified config is missing from options")
}
bval, err := strconv.ParseBool(val)
if err != nil {
return false, fmt.Errorf("Failed to parse %s as bool: %s", val, err)
}
return bval, nil
}
// ReadBoolDefault tries to parse the specified option as a boolean. If there is
// an error in parsing, the default option is returned.
func (c *Config) ReadBoolDefault(id string, defaultValue bool) bool {
val, err := c.ReadBool(id)
if err != nil {
return defaultValue
}
return val
}

View file

@ -35,15 +35,16 @@ type DockerAuthConfig struct {
type DockerDriverConfig struct { type DockerDriverConfig struct {
DockerAuthConfig DockerAuthConfig
ImageName string `mapstructure:"image"` // Container's Image Name ImageName string `mapstructure:"image"` // Container's Image Name
Command string `mapstructure:"command"` // The Command/Entrypoint to run when the container starts up Command string `mapstructure:"command"` // The Command/Entrypoint to run when the container starts up
Args string `mapstructure:"args"` // The arguments to the Command/Entrypoint Args string `mapstructure:"args"` // The arguments to the Command/Entrypoint
NetworkMode string `mapstructure:"network_mode"` // The network mode of the container - host, net and none NetworkMode string `mapstructure:"network_mode"` // The network mode of the container - host, net and none
PortMap []map[string]int `mapstructure:"port_map"` // A map of host port labels and the ports exposed on the container PortMap []map[string]int `mapstructure:"port_map"` // A map of host port labels and the ports exposed on the container
Privileged bool `mapstructure:"privileged"` // Flag to run the container in priviledged mode Privileged bool `mapstructure:"privileged"` // Flag to run the container in priviledged mode
DNS string `mapstructure:"dns_server"` // DNS Server for containers DNS string `mapstructure:"dns_server"` // DNS Server for containers
SearchDomains string `mapstructure:"search_domains"` // DNS Search domains for containers SearchDomains string `mapstructure:"search_domains"` // DNS Search domains for containers
Hostname string `mapstructure:"hostname"` // Hostname for containers Hostname string `mapstructure:"hostname"` // Hostname for containers
Labels []map[string]string `mapstructure:"labels"` // Labels to set when the container starts up
} }
func (c *DockerDriverConfig) Validate() error { func (c *DockerDriverConfig) Validate() error {
@ -54,6 +55,10 @@ func (c *DockerDriverConfig) Validate() error {
if len(c.PortMap) > 1 { if len(c.PortMap) > 1 {
return fmt.Errorf("Only one port_map block is allowed in the docker driver config") return fmt.Errorf("Only one port_map block is allowed in the docker driver config")
} }
if len(c.Labels) > 1 {
return fmt.Errorf("Only one labels block is allowed in the docker driver config")
}
return nil return nil
} }
@ -98,26 +103,16 @@ func (d *DockerDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool
// Initialize docker API client // Initialize docker API client
client, err := d.dockerClient() client, err := d.dockerClient()
if err != nil { if err != nil {
d.logger.Printf("[DEBUG] driver.docker: could not connect to docker daemon: %s", err) d.logger.Printf("[INFO] driver.docker: failed to initialize client: %s\n", err)
return false, nil return false, nil
} }
privileged, err := strconv.ParseBool(d.config.ReadDefault("docker.privileged.enabled", "false")) privileged := d.config.ReadBoolDefault("docker.privileged.enabled", false)
if err != nil { if privileged {
return false, fmt.Errorf("Unable to parse docker.privileged.enabled: %s", err) d.logger.Println("[INFO] driver.docker: privileged containers are enabled")
}
if privileged == true {
d.logger.Printf("[DEBUG] driver.docker: privileged containers enabled. Only enable if needed")
node.Attributes["docker.privileged.enabled"] = "1" node.Attributes["docker.privileged.enabled"] = "1"
} } else {
d.logger.Println("[INFO] driver.docker: privileged containers are disabled")
_, err = strconv.ParseBool(d.config.ReadDefault("docker.cleanup.container", "true"))
if err != nil {
return false, fmt.Errorf("Unable to parse docker.cleanup.container: %s", err)
}
_, err = strconv.ParseBool(d.config.ReadDefault("docker.cleanup.image", "true"))
if err != nil {
return false, fmt.Errorf("Unable to parse docker.cleanup.image: %s", err)
} }
// This is the first operation taken on the client so we'll try to // This is the first operation taken on the client so we'll try to
@ -125,7 +120,7 @@ func (d *DockerDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool
// Docker isn't available so we'll simply disable the docker driver. // Docker isn't available so we'll simply disable the docker driver.
env, err := client.Version() env, err := client.Version()
if err != nil { if err != nil {
d.logger.Printf("[INFO] driver.docker: connection to daemon failed: %s", err) d.logger.Printf("[INFO] driver.docker: could not connect to docker daemon at %s: %s\n", client.Endpoint(), err)
return false, nil return false, nil
} }
node.Attributes["driver.docker"] = "1" node.Attributes["driver.docker"] = "1"
@ -153,8 +148,10 @@ func (d *DockerDriver) containerBinds(alloc *allocdir.AllocDir, task *structs.Ta
func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, driverConfig *DockerDriverConfig) (docker.CreateContainerOptions, error) { func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, driverConfig *DockerDriverConfig) (docker.CreateContainerOptions, error) {
var c docker.CreateContainerOptions var c docker.CreateContainerOptions
if task.Resources == nil { if task.Resources == nil {
d.logger.Printf("[ERR] driver.docker: task.Resources is empty") // Guard against missing resources. We should never have been able to
return c, fmt.Errorf("task.Resources is nil and we can't constrain resource usage. We shouldn't have been able to schedule this in the first place.") // schedule a job without specifying this.
d.logger.Println("[ERR] driver.docker: task.Resources is empty")
return c, fmt.Errorf("task.Resources is empty")
} }
binds, err := d.containerBinds(ctx.AllocDir, task) binds, err := d.containerBinds(ctx.AllocDir, task)
@ -208,43 +205,32 @@ func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, dri
Binds: binds, Binds: binds,
} }
d.logger.Printf("[DEBUG] driver.docker: using %d bytes memory for %s", hostConfig.Memory, task.Config["image"]) d.logger.Printf("[DEBUG] driver.docker: using %d bytes memory for %s\n", hostConfig.Memory, task.Config["image"])
d.logger.Printf("[DEBUG] driver.docker: using %d cpu shares for %s", hostConfig.CPUShares, task.Config["image"]) d.logger.Printf("[DEBUG] driver.docker: using %d cpu shares for %s\n", hostConfig.CPUShares, task.Config["image"])
d.logger.Printf("[DEBUG] driver.docker: binding directories %#v for %s", hostConfig.Binds, task.Config["image"]) d.logger.Printf("[DEBUG] driver.docker: binding directories %#v for %s\n", hostConfig.Binds, task.Config["image"])
// set privileged mode // set privileged mode
hostPrivileged, err := strconv.ParseBool(d.config.ReadDefault("docker.privileged.enabled", "false")) hostPrivileged := d.config.ReadBoolDefault("docker.privileged.enabled", false)
if err != nil { if driverConfig.Privileged && !hostPrivileged {
return c, fmt.Errorf("Unable to parse docker.privileged.enabled: %s", err) return c, fmt.Errorf(`Unable to set privileged flag since "docker.privileged.enabled" is false`)
}
if driverConfig.Privileged {
if !hostPrivileged {
return c, fmt.Errorf(`Unable to set privileged flag since "docker.privileged.enabled" is false`)
}
hostConfig.Privileged = driverConfig.Privileged
} }
hostConfig.Privileged = hostPrivileged
// set DNS servers // set DNS servers
dns := driverConfig.DNS if driverConfig.DNS != "" {
for _, v := range strings.Split(driverConfig.DNS, ",") {
if dns != "" {
for _, v := range strings.Split(dns, ",") {
ip := strings.TrimSpace(v) ip := strings.TrimSpace(v)
if net.ParseIP(ip) != nil { if net.ParseIP(ip) != nil {
hostConfig.DNS = append(hostConfig.DNS, ip) hostConfig.DNS = append(hostConfig.DNS, ip)
} else { } else {
d.logger.Printf("[ERR] driver.docker: invalid ip address for container dns server: %s", ip) d.logger.Printf("[ERR] driver.docker: invalid ip address for container dns server: %s\n", ip)
} }
} }
} }
// set DNS search domains // set DNS search domains
dnsSearch := driverConfig.SearchDomains if driverConfig.SearchDomains != "" {
for _, v := range strings.Split(driverConfig.SearchDomains, ",") {
if dnsSearch != "" {
for _, v := range strings.Split(dnsSearch, ",") {
hostConfig.DNSSearch = append(hostConfig.DNSSearch, strings.TrimSpace(v)) hostConfig.DNSSearch = append(hostConfig.DNSSearch, strings.TrimSpace(v))
} }
} }
@ -252,13 +238,16 @@ func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, dri
hostConfig.NetworkMode = driverConfig.NetworkMode hostConfig.NetworkMode = driverConfig.NetworkMode
if hostConfig.NetworkMode == "" { if hostConfig.NetworkMode == "" {
// docker default // docker default
d.logger.Printf("[INFO] driver.docker: networking mode not specified; defaulting to bridge") d.logger.Println("[INFO] driver.docker: networking mode not specified; defaulting to bridge")
hostConfig.NetworkMode = "bridge" hostConfig.NetworkMode = "bridge"
} }
// Setup port mapping and exposed ports // Setup port mapping and exposed ports
if len(task.Resources.Networks) == 0 { if len(task.Resources.Networks) == 0 {
d.logger.Print("[WARN] driver.docker: No network resources are available for port mapping") d.logger.Println("[DEBUG] driver.docker: No network interfaces are available")
if len(driverConfig.PortMap[0]) > 0 {
return c, fmt.Errorf("Trying to map ports but no network interface is available")
}
} else { } else {
// TODO add support for more than one network // TODO add support for more than one network
network := task.Resources.Networks[0] network := task.Resources.Networks[0]
@ -266,11 +255,15 @@ func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, dri
exposedPorts := map[docker.Port]struct{}{} exposedPorts := map[docker.Port]struct{}{}
for _, port := range network.ReservedPorts { for _, port := range network.ReservedPorts {
publishedPorts[docker.Port(strconv.Itoa(port.Value)+"/tcp")] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: strconv.Itoa(port.Value)}} hostPortStr := strconv.Itoa(port.Value)
publishedPorts[docker.Port(strconv.Itoa(port.Value)+"/udp")] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: strconv.Itoa(port.Value)}} dockerPort := docker.Port(hostPortStr)
publishedPorts[dockerPort+"/tcp"] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: hostPortStr}}
publishedPorts[dockerPort+"/udp"] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: hostPortStr}}
d.logger.Printf("[DEBUG] driver.docker: allocated port %s:%d -> %d (static)\n", network.IP, port.Value, port.Value) d.logger.Printf("[DEBUG] driver.docker: allocated port %s:%d -> %d (static)\n", network.IP, port.Value, port.Value)
exposedPorts[docker.Port(strconv.Itoa(port.Value)+"/tcp")] = struct{}{}
exposedPorts[docker.Port(strconv.Itoa(port.Value)+"/udp")] = struct{}{} exposedPorts[dockerPort+"/tcp"] = struct{}{}
exposedPorts[dockerPort+"/udp"] = struct{}{}
d.logger.Printf("[DEBUG] driver.docker: exposed port %d\n", port.Value) d.logger.Printf("[DEBUG] driver.docker: exposed port %d\n", port.Value)
} }
@ -280,15 +273,19 @@ func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, dri
if !ok { if !ok {
containerPort = port.Value containerPort = port.Value
} }
cp := strconv.Itoa(containerPort)
hostPort := strconv.Itoa(port.Value) containerPortStr := docker.Port(strconv.Itoa(containerPort))
publishedPorts[docker.Port(cp+"/tcp")] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: hostPort}} hostPortStr := strconv.Itoa(port.Value)
publishedPorts[docker.Port(cp+"/udp")] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: hostPort}}
d.logger.Printf("[DEBUG] driver.docker: allocated port %s:%d -> %d (mapped)", network.IP, port.Value, containerPort) publishedPorts[containerPortStr+"/tcp"] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: hostPortStr}}
exposedPorts[docker.Port(cp+"/tcp")] = struct{}{} publishedPorts[containerPortStr+"/udp"] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: hostPortStr}}
exposedPorts[docker.Port(cp+"/udp")] = struct{}{} d.logger.Printf("[DEBUG] driver.docker: allocated port %s:%d -> %d (mapped)\n", network.IP, port.Value, containerPort)
d.logger.Printf("[DEBUG] driver.docker: exposed port %s\n", hostPort)
containerToHostPortMap[cp] = port.Value exposedPorts[containerPortStr+"/tcp"] = struct{}{}
exposedPorts[containerPortStr+"/udp"] = struct{}{}
d.logger.Printf("[DEBUG] driver.docker: exposed port %s\n", hostPortStr)
containerToHostPortMap[string(containerPortStr)] = port.Value
} }
env.SetPorts(containerToHostPortMap) env.SetPorts(containerToHostPortMap)
@ -308,13 +305,20 @@ func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, dri
if driverConfig.Args != "" { if driverConfig.Args != "" {
cmd = append(cmd, parsedArgs...) cmd = append(cmd, parsedArgs...)
} }
d.logger.Printf("[DEBUG] driver.docker: setting container startup command to: %s\n", strings.Join(cmd, " "))
config.Cmd = cmd config.Cmd = cmd
} else if driverConfig.Args != "" { } else if driverConfig.Args != "" {
d.logger.Println("[DEBUG] driver.docker: ignoring args because command not specified") d.logger.Println("[DEBUG] driver.docker: ignoring command arguments because command is not specified")
}
if len(driverConfig.Labels) == 1 {
config.Labels = driverConfig.Labels[0]
d.logger.Println("[DEBUG] driver.docker: applied labels on the container")
} }
config.Env = env.List() config.Env = env.List()
return docker.CreateContainerOptions{ return docker.CreateContainerOptions{
Name: fmt.Sprintf("%s-%s", task.Name, ctx.AllocID),
Config: config, Config: config,
HostConfig: hostConfig, HostConfig: hostConfig,
}, nil }, nil
@ -340,14 +344,8 @@ func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle
return nil, fmt.Errorf("CPU limit cannot be zero") return nil, fmt.Errorf("CPU limit cannot be zero")
} }
cleanupContainer, err := strconv.ParseBool(d.config.ReadDefault("docker.cleanup.container", "true")) cleanupContainer := d.config.ReadBoolDefault("docker.cleanup.container", true)
if err != nil { cleanupImage := d.config.ReadBoolDefault("docker.cleanup.image", true)
return nil, fmt.Errorf("Unable to parse docker.cleanup.container: %s", err)
}
cleanupImage, err := strconv.ParseBool(d.config.ReadDefault("docker.cleanup.image", "true"))
if err != nil {
return nil, fmt.Errorf("Unable to parse docker.cleanup.image: %s", err)
}
// Initialize docker API client // Initialize docker API client
client, err := d.dockerClient() client, err := d.dockerClient()
@ -386,41 +384,40 @@ func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle
err = client.PullImage(pullOptions, authOptions) err = client.PullImage(pullOptions, authOptions)
if err != nil { if err != nil {
d.logger.Printf("[ERR] driver.docker: pulling container %s", err) d.logger.Printf("[ERR] driver.docker: failed pulling container %s:%s: %s\n", repo, tag, err)
return nil, fmt.Errorf("Failed to pull `%s`: %s", image, err) return nil, fmt.Errorf("Failed to pull `%s`: %s", image, err)
} }
d.logger.Printf("[DEBUG] driver.docker: docker pull %s:%s succeeded", repo, tag) d.logger.Printf("[DEBUG] driver.docker: docker pull %s:%s succeeded\n", repo, tag)
// Now that we have the image we can get the image id // Now that we have the image we can get the image id
dockerImage, err = client.InspectImage(image) dockerImage, err = client.InspectImage(image)
if err != nil { if err != nil {
d.logger.Printf("[ERR] driver.docker: getting image id for %s", image) d.logger.Printf("[ERR] driver.docker: failed getting image id for %s\n", image)
return nil, fmt.Errorf("Failed to determine image id for `%s`: %s", image, err) return nil, fmt.Errorf("Failed to determine image id for `%s`: %s", image, err)
} }
} }
d.logger.Printf("[DEBUG] driver.docker: using image %s", dockerImage.ID) d.logger.Printf("[DEBUG] driver.docker: identified image %s as %s\n", image, dockerImage.ID)
d.logger.Printf("[INFO] driver.docker: identified image %s as %s", image, dockerImage.ID)
config, err := d.createContainer(ctx, task, &driverConfig) config, err := d.createContainer(ctx, task, &driverConfig)
if err != nil { if err != nil {
d.logger.Printf("[ERR] driver.docker: %s", err) d.logger.Printf("[ERR] driver.docker: failed to create container configuration for image %s: %s\n", image, err)
return nil, fmt.Errorf("Failed to create container config for image %s", image) return nil, fmt.Errorf("Failed to create container configuration for image %s: %s", image, err)
} }
// Create a container // Create a container
container, err := client.CreateContainer(config) container, err := client.CreateContainer(config)
if err != nil { if err != nil {
d.logger.Printf("[ERR] driver.docker: %s", err) d.logger.Printf("[ERR] driver.docker: failed to create container from image %s: %s\n", image, err)
return nil, fmt.Errorf("Failed to create container from image %s", image) return nil, fmt.Errorf("Failed to create container from image %s", image)
} }
d.logger.Printf("[INFO] driver.docker: created container %s", container.ID) d.logger.Printf("[INFO] driver.docker: created container %s\n", container.ID)
// Start the container // Start the container
err = client.StartContainer(container.ID, container.HostConfig) err = client.StartContainer(container.ID, container.HostConfig)
if err != nil { if err != nil {
d.logger.Printf("[ERR] driver.docker: starting container %s", container.ID) d.logger.Printf("[ERR] driver.docker: starting container %s\n", container.ID)
return nil, fmt.Errorf("Failed to start container %s", container.ID) return nil, fmt.Errorf("Failed to start container %s", container.ID)
} }
d.logger.Printf("[INFO] driver.docker: started container %s", container.ID) d.logger.Printf("[INFO] driver.docker: started container %s\n", container.ID)
// Return a driver handle // Return a driver handle
h := &dockerHandle{ h := &dockerHandle{
@ -438,23 +435,16 @@ func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle
} }
func (d *DockerDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) { func (d *DockerDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {
cleanupContainer, err := strconv.ParseBool(d.config.ReadDefault("docker.cleanup.container", "true")) cleanupContainer := d.config.ReadBoolDefault("docker.cleanup.container", true)
if err != nil { cleanupImage := d.config.ReadBoolDefault("docker.cleanup.image", true)
return nil, fmt.Errorf("Unable to parse docker.cleanup.container: %s", err)
}
cleanupImage, err := strconv.ParseBool(d.config.ReadDefault("docker.cleanup.image", "true"))
if err != nil {
return nil, fmt.Errorf("Unable to parse docker.cleanup.image: %s", err)
}
// Split the handle // Split the handle
pidBytes := []byte(strings.TrimPrefix(handleID, "DOCKER:")) pidBytes := []byte(strings.TrimPrefix(handleID, "DOCKER:"))
pid := &dockerPID{} pid := &dockerPID{}
err = json.Unmarshal(pidBytes, pid) if err := json.Unmarshal(pidBytes, pid); err != nil {
if err != nil {
return nil, fmt.Errorf("Failed to parse handle '%s': %v", handleID, err) return nil, fmt.Errorf("Failed to parse handle '%s': %v", handleID, err)
} }
d.logger.Printf("[INFO] driver.docker: re-attaching to docker process: %s", handleID) d.logger.Printf("[INFO] driver.docker: re-attaching to docker process: %s\n", handleID)
// Initialize docker API client // Initialize docker API client
client, err := d.dockerClient() client, err := d.dockerClient()
@ -505,7 +495,7 @@ func (h *dockerHandle) ID() string {
} }
data, err := json.Marshal(pid) data, err := json.Marshal(pid)
if err != nil { if err != nil {
h.logger.Printf("[ERR] driver.docker: failed to marshal docker PID to JSON: %s", err) h.logger.Printf("[ERR] driver.docker: failed to marshal docker PID to JSON: %s\n", err)
} }
return fmt.Sprintf("DOCKER:%s", string(data)) return fmt.Sprintf("DOCKER:%s", string(data))
} }
@ -524,7 +514,7 @@ func (h *dockerHandle) Kill() error {
// Stop the container // Stop the container
err := h.client.StopContainer(h.containerID, 5) err := h.client.StopContainer(h.containerID, 5)
if err != nil { if err != nil {
log.Printf("[ERR] driver.docker: failed stopping container %s", h.containerID) log.Printf("[ERR] driver.docker: failed to stop container %s", h.containerID)
return fmt.Errorf("Failed to stop container %s: %s", h.containerID, err) return fmt.Errorf("Failed to stop container %s: %s", h.containerID, err)
} }
log.Printf("[INFO] driver.docker: stopped container %s", h.containerID) log.Printf("[INFO] driver.docker: stopped container %s", h.containerID)
@ -536,7 +526,7 @@ func (h *dockerHandle) Kill() error {
RemoveVolumes: true, RemoveVolumes: true,
}) })
if err != nil { if err != nil {
log.Printf("[ERR] driver.docker: removing container %s", h.containerID) log.Printf("[ERR] driver.docker: failed to remove container %s", h.containerID)
return fmt.Errorf("Failed to remove container %s: %s", h.containerID, err) return fmt.Errorf("Failed to remove container %s: %s", h.containerID, err)
} }
log.Printf("[INFO] driver.docker: removed container %s", h.containerID) log.Printf("[INFO] driver.docker: removed container %s", h.containerID)
@ -559,12 +549,12 @@ func (h *dockerHandle) Kill() error {
} }
inUse := len(containers) inUse := len(containers)
if inUse > 0 { if inUse > 0 {
log.Printf("[INFO] driver.docker: image %s is still in use by %d containers", h.imageID, inUse) log.Printf("[INFO] driver.docker: image %s is still in use by %d containers\n", h.imageID, inUse)
} else { } else {
return fmt.Errorf("Failed to remove image %s", h.imageID) return fmt.Errorf("Failed to remove image %s", h.imageID)
} }
} else { } else {
log.Printf("[INFO] driver.docker: removed image %s", h.imageID) log.Printf("[INFO] driver.docker: removed image %s\n", h.imageID)
} }
} }
return nil return nil
@ -574,7 +564,7 @@ func (h *dockerHandle) run() {
// Wait for it... // Wait for it...
exitCode, err := h.client.WaitContainer(h.containerID) exitCode, err := h.client.WaitContainer(h.containerID)
if err != nil { if err != nil {
h.logger.Printf("[ERR] driver.docker: unable to wait for %s; container already terminated", h.containerID) h.logger.Printf("[ERR] driver.docker: unable to wait for %s; container already terminated\n", h.containerID)
} }
if exitCode != 0 { if exitCode != 0 {

View file

@ -1,10 +1,12 @@
package driver package driver
import ( import (
"encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"path/filepath" "path/filepath"
"reflect" "reflect"
"strings"
"testing" "testing"
"time" "time"
@ -433,3 +435,58 @@ func TestDockerHostNet(t *testing.T) {
} }
defer handle.Kill() defer handle.Kill()
} }
func TestDockerLabels(t *testing.T) {
if !dockerIsConnected(t) {
t.SkipNow()
}
task := taskTemplate()
task.Config["labels"] = []map[string]string{
map[string]string{
"label1": "value1",
"label2": "value2",
},
}
driverCtx := testDockerDriverContext(task.Name)
ctx := testDriverExecContext(task, driverCtx)
defer ctx.AllocDir.Destroy()
d := NewDockerDriver(driverCtx)
handle, err := d.Start(ctx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
if handle == nil {
t.Fatalf("missing handle")
}
client, err := docker.NewClientFromEnv()
if err != nil {
t.Fatalf("err: %v", err)
}
// don't know if is queriable in a clean way
parts := strings.SplitN(handle.ID(), ":", 2)
var pid dockerPID
err = json.Unmarshal([]byte(parts[1]), &pid)
if err != nil {
t.Fatalf("err: %v", err)
}
container, err := client.InspectContainer(pid.ContainerID)
if err != nil {
t.Fatalf("err: %v", err)
}
if want, got := 2, len(container.Config.Labels); want != got {
t.Errorf("Wrong labels count for docker job. Expect: %d, got: %d", want, got)
}
if want, got := "value1", container.Config.Labels["label1"]; want != got {
t.Errorf("Wrong label value docker job. Expect: %s, got: %s", want, got)
}
defer handle.Kill()
}

View file

@ -49,13 +49,16 @@ specification:
launching more than one of a task (using `count`) with this option set, every launching more than one of a task (using `count`) with this option set, every
container the task starts will have the same hostname. container the task starts will have the same hostname.
* `labels` - (Optional) A key/value map of labels to set to the containers on start.
**Authentication** Registry authentication can be set per task with the **Authentication** Registry authentication can be set per task with the
following authentication parameters. These options can provide access to following authentication parameters. These options can provide access to
private repositories that utilize the docker remote api (e.g. dockerhub, private repositories that utilize the docker remote api (e.g. dockerhub,
quay.io) quay.io)
- `auth.username` - (Optional) The account username - `auth.username` - (optional) The account username
- `auth.password` - (Optional) The account password - `auth.password` - (optional) The account password
- `auth.email` - (Optional) The account email - `auth.email` - (optional) The account email
- `auth.server-address` - (Optional) The server domain/ip without the - `auth.server-address` - (Optional) The server domain/ip without the
protocol protocol

View file

@ -3,20 +3,22 @@ layout: "docs"
page_title: "Drivers: Rkt" page_title: "Drivers: Rkt"
sidebar_current: "docs-drivers-rkt" sidebar_current: "docs-drivers-rkt"
description: |- description: |-
The Rkt task driver is used to run application containers using Rkt. The rkt task driver is used to run application containers using rkt.
--- ---
# Rkt Driver # Rkt Driver - Experimental
Name: `rkt` Name: `rkt`
The `Rkt` driver provides an interface for using CoreOS Rkt for running The `rkt` driver provides an interface for using CoreOS rkt for running
application containers. Currently, the driver supports launching application containers. Currently, the driver supports launching
containers. containers but does not support resource isolation or dynamic ports. This can
lead to resource over commitment and port conflicts and as such, this driver is
being marked as experimental and should be used with care.
## Task Configuration ## Task Configuration
The `Rkt` driver supports the following configuration in the job spec: The `rkt` driver supports the following configuration in the job spec:
* `trust_prefix` - **(Optional)** The trust prefix to be passed to rkt. Must be reachable from * `trust_prefix` - **(Optional)** The trust prefix to be passed to rkt. Must be reachable from
the box running the nomad agent. If not specified, the image is run without the box running the nomad agent. If not specified, the image is run without
@ -28,23 +30,23 @@ hash, ACI address or docker registry.
## Task Directories ## Task Directories
The `Rkt` driver does not currently support mounting the `alloc/` and `local/` The `rkt` driver does not currently support mounting the `alloc/` and `local/`
directory. It is currently blocked by this [Rkt directory. It is currently blocked by this [rkt
issue](https://github.com/coreos/rkt/issues/761). As such the coresponding issue](https://github.com/coreos/rkt/issues/761). As such the coresponding
[environment variables](/docs/jobspec/environment.html#task_dir) are not set. [environment variables](/docs/jobspec/environment.html#task_dir) are not set.
## Client Requirements ## Client Requirements
The `Rkt` driver requires rkt to be installed and in your systems `$PATH`. The `rkt` driver requires rkt to be installed and in your systems `$PATH`.
The `trust_prefix` must be accessible by the node running Nomad. This can be an The `trust_prefix` must be accessible by the node running Nomad. This can be an
internal source, private to your cluster, but it must be reachable by the client internal source, private to your cluster, but it must be reachable by the client
over HTTP. over HTTP.
## Client Attributes ## Client Attributes
The `Rkt` driver will set the following client attributes: The `rkt` driver will set the following client attributes:
* `driver.rkt` - Set to `1` if Rkt is found on the host node. Nomad determines * `driver.rkt` - Set to `1` if rkt is found on the host node. Nomad determines
this by executing `rkt version` on the host and parsing the output this by executing `rkt version` on the host and parsing the output
* `driver.rkt.version` - Version of `rkt` eg: `0.8.1` * `driver.rkt.version` - Version of `rkt` eg: `0.8.1`
* `driver.rkt.appc.version` - Version of `appc` that `rkt` is using eg: `0.8.1` * `driver.rkt.appc.version` - Version of `appc` that `rkt` is using eg: `0.8.1`