Merge branch 'master' into f-docker-any-network

This commit is contained in:
Chris Bednarski 2015-11-17 17:22:13 -08:00
commit 122220a424
6 changed files with 198 additions and 117 deletions

View File

@ -41,12 +41,16 @@ IMPROVEMENTS:
* client: Test Skip Detection [GH-221]
* driver/docker: Advanced docker driver options [GH-390]
* driver/docker: Docker hostname can be set [GH-426]
<<<<<<< Updated upstream
* driver/docker: Mount task local and alloc directory to docker containers
[GH-290]
* driver/docker: Pass JVM options in java driver [GH-293, GH-297]
* drivers: Use BlkioWeight rather than BlkioThrottleReadIopsDevice [GH-222]
* jobspec and drivers: Driver configuration supports arbitrary struct to be
passed in jobspec [GH-415]
=======
* driver/docker: Docker container name can be set [GH-389]
>>>>>>> Stashed changes
BUG FIXES:

View File

@ -1,7 +1,9 @@
package config
import (
"fmt"
"io"
"strconv"
"github.com/hashicorp/nomad/nomad/structs"
)
@ -73,3 +75,26 @@ func (c *Config) ReadDefault(id string, defaultValue string) string {
}
return defaultValue
}
// ReadBool parses the specified option as a boolean.
func (c *Config) ReadBool(id string) (bool, error) {
val, ok := c.Options[id]
if !ok {
return false, fmt.Errorf("Specified config is missing from options")
}
bval, err := strconv.ParseBool(val)
if err != nil {
return false, fmt.Errorf("Failed to parse %s as bool: %s", val, err)
}
return bval, nil
}
// ReadBoolDefault tries to parse the specified option as a boolean. If there is
// an error in parsing, the default option is returned.
func (c *Config) ReadBoolDefault(id string, defaultValue bool) bool {
val, err := c.ReadBool(id)
if err != nil {
return defaultValue
}
return val
}

View File

@ -44,6 +44,7 @@ type DockerDriverConfig struct {
DNS string `mapstructure:"dns_server"` // DNS Server for containers
SearchDomains string `mapstructure:"search_domains"` // DNS Search domains for containers
Hostname string `mapstructure:"hostname"` // Hostname for containers
Labels []map[string]string `mapstructure:"labels"` // Labels to set when the container starts up
}
func (c *DockerDriverConfig) Validate() error {
@ -54,6 +55,10 @@ func (c *DockerDriverConfig) Validate() error {
if len(c.PortMap) > 1 {
return fmt.Errorf("Only one port_map block is allowed in the docker driver config")
}
if len(c.Labels) > 1 {
return fmt.Errorf("Only one labels block is allowed in the docker driver config")
}
return nil
}
@ -98,26 +103,16 @@ func (d *DockerDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool
// Initialize docker API client
client, err := d.dockerClient()
if err != nil {
d.logger.Printf("[DEBUG] driver.docker: could not connect to docker daemon: %s", err)
d.logger.Printf("[INFO] driver.docker: failed to initialize client: %s\n", err)
return false, nil
}
privileged, err := strconv.ParseBool(d.config.ReadDefault("docker.privileged.enabled", "false"))
if err != nil {
return false, fmt.Errorf("Unable to parse docker.privileged.enabled: %s", err)
}
if privileged == true {
d.logger.Printf("[DEBUG] driver.docker: privileged containers enabled. Only enable if needed")
privileged := d.config.ReadBoolDefault("docker.privileged.enabled", false)
if privileged {
d.logger.Println("[INFO] driver.docker: privileged containers are enabled")
node.Attributes["docker.privileged.enabled"] = "1"
}
_, err = strconv.ParseBool(d.config.ReadDefault("docker.cleanup.container", "true"))
if err != nil {
return false, fmt.Errorf("Unable to parse docker.cleanup.container: %s", err)
}
_, err = strconv.ParseBool(d.config.ReadDefault("docker.cleanup.image", "true"))
if err != nil {
return false, fmt.Errorf("Unable to parse docker.cleanup.image: %s", err)
} else {
d.logger.Println("[INFO] driver.docker: privileged containers are disabled")
}
// This is the first operation taken on the client so we'll try to
@ -125,7 +120,7 @@ func (d *DockerDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool
// Docker isn't available so we'll simply disable the docker driver.
env, err := client.Version()
if err != nil {
d.logger.Printf("[INFO] driver.docker: connection to daemon failed: %s", err)
d.logger.Printf("[INFO] driver.docker: could not connect to docker daemon at %s: %s\n", client.Endpoint(), err)
return false, nil
}
node.Attributes["driver.docker"] = "1"
@ -153,8 +148,10 @@ func (d *DockerDriver) containerBinds(alloc *allocdir.AllocDir, task *structs.Ta
func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, driverConfig *DockerDriverConfig) (docker.CreateContainerOptions, error) {
var c docker.CreateContainerOptions
if task.Resources == nil {
d.logger.Printf("[ERR] driver.docker: task.Resources is empty")
return c, fmt.Errorf("task.Resources is nil and we can't constrain resource usage. We shouldn't have been able to schedule this in the first place.")
// Guard against missing resources. We should never have been able to
// schedule a job without specifying this.
d.logger.Println("[ERR] driver.docker: task.Resources is empty")
return c, fmt.Errorf("task.Resources is empty")
}
binds, err := d.containerBinds(ctx.AllocDir, task)
@ -208,43 +205,32 @@ func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, dri
Binds: binds,
}
d.logger.Printf("[DEBUG] driver.docker: using %d bytes memory for %s", hostConfig.Memory, task.Config["image"])
d.logger.Printf("[DEBUG] driver.docker: using %d cpu shares for %s", hostConfig.CPUShares, task.Config["image"])
d.logger.Printf("[DEBUG] driver.docker: binding directories %#v for %s", hostConfig.Binds, task.Config["image"])
d.logger.Printf("[DEBUG] driver.docker: using %d bytes memory for %s\n", hostConfig.Memory, task.Config["image"])
d.logger.Printf("[DEBUG] driver.docker: using %d cpu shares for %s\n", hostConfig.CPUShares, task.Config["image"])
d.logger.Printf("[DEBUG] driver.docker: binding directories %#v for %s\n", hostConfig.Binds, task.Config["image"])
// set privileged mode
hostPrivileged, err := strconv.ParseBool(d.config.ReadDefault("docker.privileged.enabled", "false"))
if err != nil {
return c, fmt.Errorf("Unable to parse docker.privileged.enabled: %s", err)
}
if driverConfig.Privileged {
if !hostPrivileged {
hostPrivileged := d.config.ReadBoolDefault("docker.privileged.enabled", false)
if driverConfig.Privileged && !hostPrivileged {
return c, fmt.Errorf(`Unable to set privileged flag since "docker.privileged.enabled" is false`)
}
hostConfig.Privileged = driverConfig.Privileged
}
hostConfig.Privileged = hostPrivileged
// set DNS servers
dns := driverConfig.DNS
if dns != "" {
for _, v := range strings.Split(dns, ",") {
if driverConfig.DNS != "" {
for _, v := range strings.Split(driverConfig.DNS, ",") {
ip := strings.TrimSpace(v)
if net.ParseIP(ip) != nil {
hostConfig.DNS = append(hostConfig.DNS, ip)
} else {
d.logger.Printf("[ERR] driver.docker: invalid ip address for container dns server: %s", ip)
d.logger.Printf("[ERR] driver.docker: invalid ip address for container dns server: %s\n", ip)
}
}
}
// set DNS search domains
dnsSearch := driverConfig.SearchDomains
if dnsSearch != "" {
for _, v := range strings.Split(dnsSearch, ",") {
if driverConfig.SearchDomains != "" {
for _, v := range strings.Split(driverConfig.SearchDomains, ",") {
hostConfig.DNSSearch = append(hostConfig.DNSSearch, strings.TrimSpace(v))
}
}
@ -252,13 +238,16 @@ func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, dri
hostConfig.NetworkMode = driverConfig.NetworkMode
if hostConfig.NetworkMode == "" {
// docker default
d.logger.Printf("[INFO] driver.docker: networking mode not specified; defaulting to bridge")
d.logger.Println("[INFO] driver.docker: networking mode not specified; defaulting to bridge")
hostConfig.NetworkMode = "bridge"
}
// Setup port mapping and exposed ports
if len(task.Resources.Networks) == 0 {
d.logger.Print("[WARN] driver.docker: No network resources are available for port mapping")
d.logger.Println("[DEBUG] driver.docker: No network interfaces are available")
if len(driverConfig.PortMap[0]) > 0 {
return c, fmt.Errorf("Trying to map ports but no network interface is available")
}
} else {
// TODO add support for more than one network
network := task.Resources.Networks[0]
@ -266,11 +255,15 @@ func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, dri
exposedPorts := map[docker.Port]struct{}{}
for _, port := range network.ReservedPorts {
publishedPorts[docker.Port(strconv.Itoa(port.Value)+"/tcp")] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: strconv.Itoa(port.Value)}}
publishedPorts[docker.Port(strconv.Itoa(port.Value)+"/udp")] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: strconv.Itoa(port.Value)}}
hostPortStr := strconv.Itoa(port.Value)
dockerPort := docker.Port(hostPortStr)
publishedPorts[dockerPort+"/tcp"] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: hostPortStr}}
publishedPorts[dockerPort+"/udp"] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: hostPortStr}}
d.logger.Printf("[DEBUG] driver.docker: allocated port %s:%d -> %d (static)\n", network.IP, port.Value, port.Value)
exposedPorts[docker.Port(strconv.Itoa(port.Value)+"/tcp")] = struct{}{}
exposedPorts[docker.Port(strconv.Itoa(port.Value)+"/udp")] = struct{}{}
exposedPorts[dockerPort+"/tcp"] = struct{}{}
exposedPorts[dockerPort+"/udp"] = struct{}{}
d.logger.Printf("[DEBUG] driver.docker: exposed port %d\n", port.Value)
}
@ -280,15 +273,19 @@ func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, dri
if !ok {
containerPort = port.Value
}
cp := strconv.Itoa(containerPort)
hostPort := strconv.Itoa(port.Value)
publishedPorts[docker.Port(cp+"/tcp")] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: hostPort}}
publishedPorts[docker.Port(cp+"/udp")] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: hostPort}}
d.logger.Printf("[DEBUG] driver.docker: allocated port %s:%d -> %d (mapped)", network.IP, port.Value, containerPort)
exposedPorts[docker.Port(cp+"/tcp")] = struct{}{}
exposedPorts[docker.Port(cp+"/udp")] = struct{}{}
d.logger.Printf("[DEBUG] driver.docker: exposed port %s\n", hostPort)
containerToHostPortMap[cp] = port.Value
containerPortStr := docker.Port(strconv.Itoa(containerPort))
hostPortStr := strconv.Itoa(port.Value)
publishedPorts[containerPortStr+"/tcp"] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: hostPortStr}}
publishedPorts[containerPortStr+"/udp"] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: hostPortStr}}
d.logger.Printf("[DEBUG] driver.docker: allocated port %s:%d -> %d (mapped)\n", network.IP, port.Value, containerPort)
exposedPorts[containerPortStr+"/tcp"] = struct{}{}
exposedPorts[containerPortStr+"/udp"] = struct{}{}
d.logger.Printf("[DEBUG] driver.docker: exposed port %s\n", hostPortStr)
containerToHostPortMap[string(containerPortStr)] = port.Value
}
env.SetPorts(containerToHostPortMap)
@ -308,13 +305,20 @@ func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, dri
if driverConfig.Args != "" {
cmd = append(cmd, parsedArgs...)
}
d.logger.Printf("[DEBUG] driver.docker: setting container startup command to: %s\n", strings.Join(cmd, " "))
config.Cmd = cmd
} else if driverConfig.Args != "" {
d.logger.Println("[DEBUG] driver.docker: ignoring args because command not specified")
d.logger.Println("[DEBUG] driver.docker: ignoring command arguments because command is not specified")
}
if len(driverConfig.Labels) == 1 {
config.Labels = driverConfig.Labels[0]
d.logger.Println("[DEBUG] driver.docker: applied labels on the container")
}
config.Env = env.List()
return docker.CreateContainerOptions{
Name: fmt.Sprintf("%s-%s", task.Name, ctx.AllocID),
Config: config,
HostConfig: hostConfig,
}, nil
@ -340,14 +344,8 @@ func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle
return nil, fmt.Errorf("CPU limit cannot be zero")
}
cleanupContainer, err := strconv.ParseBool(d.config.ReadDefault("docker.cleanup.container", "true"))
if err != nil {
return nil, fmt.Errorf("Unable to parse docker.cleanup.container: %s", err)
}
cleanupImage, err := strconv.ParseBool(d.config.ReadDefault("docker.cleanup.image", "true"))
if err != nil {
return nil, fmt.Errorf("Unable to parse docker.cleanup.image: %s", err)
}
cleanupContainer := d.config.ReadBoolDefault("docker.cleanup.container", true)
cleanupImage := d.config.ReadBoolDefault("docker.cleanup.image", true)
// Initialize docker API client
client, err := d.dockerClient()
@ -386,41 +384,40 @@ func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle
err = client.PullImage(pullOptions, authOptions)
if err != nil {
d.logger.Printf("[ERR] driver.docker: pulling container %s", err)
d.logger.Printf("[ERR] driver.docker: failed pulling container %s:%s: %s\n", repo, tag, err)
return nil, fmt.Errorf("Failed to pull `%s`: %s", image, err)
}
d.logger.Printf("[DEBUG] driver.docker: docker pull %s:%s succeeded", repo, tag)
d.logger.Printf("[DEBUG] driver.docker: docker pull %s:%s succeeded\n", repo, tag)
// Now that we have the image we can get the image id
dockerImage, err = client.InspectImage(image)
if err != nil {
d.logger.Printf("[ERR] driver.docker: getting image id for %s", image)
d.logger.Printf("[ERR] driver.docker: failed getting image id for %s\n", image)
return nil, fmt.Errorf("Failed to determine image id for `%s`: %s", image, err)
}
}
d.logger.Printf("[DEBUG] driver.docker: using image %s", dockerImage.ID)
d.logger.Printf("[INFO] driver.docker: identified image %s as %s", image, dockerImage.ID)
d.logger.Printf("[DEBUG] driver.docker: identified image %s as %s\n", image, dockerImage.ID)
config, err := d.createContainer(ctx, task, &driverConfig)
if err != nil {
d.logger.Printf("[ERR] driver.docker: %s", err)
return nil, fmt.Errorf("Failed to create container config for image %s", image)
d.logger.Printf("[ERR] driver.docker: failed to create container configuration for image %s: %s\n", image, err)
return nil, fmt.Errorf("Failed to create container configuration for image %s: %s", image, err)
}
// Create a container
container, err := client.CreateContainer(config)
if err != nil {
d.logger.Printf("[ERR] driver.docker: %s", err)
d.logger.Printf("[ERR] driver.docker: failed to create container from image %s: %s\n", image, err)
return nil, fmt.Errorf("Failed to create container from image %s", image)
}
d.logger.Printf("[INFO] driver.docker: created container %s", container.ID)
d.logger.Printf("[INFO] driver.docker: created container %s\n", container.ID)
// Start the container
err = client.StartContainer(container.ID, container.HostConfig)
if err != nil {
d.logger.Printf("[ERR] driver.docker: starting container %s", container.ID)
d.logger.Printf("[ERR] driver.docker: starting container %s\n", container.ID)
return nil, fmt.Errorf("Failed to start container %s", container.ID)
}
d.logger.Printf("[INFO] driver.docker: started container %s", container.ID)
d.logger.Printf("[INFO] driver.docker: started container %s\n", container.ID)
// Return a driver handle
h := &dockerHandle{
@ -438,23 +435,16 @@ func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle
}
func (d *DockerDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {
cleanupContainer, err := strconv.ParseBool(d.config.ReadDefault("docker.cleanup.container", "true"))
if err != nil {
return nil, fmt.Errorf("Unable to parse docker.cleanup.container: %s", err)
}
cleanupImage, err := strconv.ParseBool(d.config.ReadDefault("docker.cleanup.image", "true"))
if err != nil {
return nil, fmt.Errorf("Unable to parse docker.cleanup.image: %s", err)
}
cleanupContainer := d.config.ReadBoolDefault("docker.cleanup.container", true)
cleanupImage := d.config.ReadBoolDefault("docker.cleanup.image", true)
// Split the handle
pidBytes := []byte(strings.TrimPrefix(handleID, "DOCKER:"))
pid := &dockerPID{}
err = json.Unmarshal(pidBytes, pid)
if err != nil {
if err := json.Unmarshal(pidBytes, pid); err != nil {
return nil, fmt.Errorf("Failed to parse handle '%s': %v", handleID, err)
}
d.logger.Printf("[INFO] driver.docker: re-attaching to docker process: %s", handleID)
d.logger.Printf("[INFO] driver.docker: re-attaching to docker process: %s\n", handleID)
// Initialize docker API client
client, err := d.dockerClient()
@ -505,7 +495,7 @@ func (h *dockerHandle) ID() string {
}
data, err := json.Marshal(pid)
if err != nil {
h.logger.Printf("[ERR] driver.docker: failed to marshal docker PID to JSON: %s", err)
h.logger.Printf("[ERR] driver.docker: failed to marshal docker PID to JSON: %s\n", err)
}
return fmt.Sprintf("DOCKER:%s", string(data))
}
@ -524,7 +514,7 @@ func (h *dockerHandle) Kill() error {
// Stop the container
err := h.client.StopContainer(h.containerID, 5)
if err != nil {
log.Printf("[ERR] driver.docker: failed stopping container %s", h.containerID)
log.Printf("[ERR] driver.docker: failed to stop container %s", h.containerID)
return fmt.Errorf("Failed to stop container %s: %s", h.containerID, err)
}
log.Printf("[INFO] driver.docker: stopped container %s", h.containerID)
@ -536,7 +526,7 @@ func (h *dockerHandle) Kill() error {
RemoveVolumes: true,
})
if err != nil {
log.Printf("[ERR] driver.docker: removing container %s", h.containerID)
log.Printf("[ERR] driver.docker: failed to remove container %s", h.containerID)
return fmt.Errorf("Failed to remove container %s: %s", h.containerID, err)
}
log.Printf("[INFO] driver.docker: removed container %s", h.containerID)
@ -559,12 +549,12 @@ func (h *dockerHandle) Kill() error {
}
inUse := len(containers)
if inUse > 0 {
log.Printf("[INFO] driver.docker: image %s is still in use by %d containers", h.imageID, inUse)
log.Printf("[INFO] driver.docker: image %s is still in use by %d containers\n", h.imageID, inUse)
} else {
return fmt.Errorf("Failed to remove image %s", h.imageID)
}
} else {
log.Printf("[INFO] driver.docker: removed image %s", h.imageID)
log.Printf("[INFO] driver.docker: removed image %s\n", h.imageID)
}
}
return nil
@ -574,7 +564,7 @@ func (h *dockerHandle) run() {
// Wait for it...
exitCode, err := h.client.WaitContainer(h.containerID)
if err != nil {
h.logger.Printf("[ERR] driver.docker: unable to wait for %s; container already terminated", h.containerID)
h.logger.Printf("[ERR] driver.docker: unable to wait for %s; container already terminated\n", h.containerID)
}
if exitCode != 0 {

View File

@ -1,10 +1,12 @@
package driver
import (
"encoding/json"
"fmt"
"io/ioutil"
"path/filepath"
"reflect"
"strings"
"testing"
"time"
@ -433,3 +435,58 @@ func TestDockerHostNet(t *testing.T) {
}
defer handle.Kill()
}
func TestDockerLabels(t *testing.T) {
if !dockerIsConnected(t) {
t.SkipNow()
}
task := taskTemplate()
task.Config["labels"] = []map[string]string{
map[string]string{
"label1": "value1",
"label2": "value2",
},
}
driverCtx := testDockerDriverContext(task.Name)
ctx := testDriverExecContext(task, driverCtx)
defer ctx.AllocDir.Destroy()
d := NewDockerDriver(driverCtx)
handle, err := d.Start(ctx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
if handle == nil {
t.Fatalf("missing handle")
}
client, err := docker.NewClientFromEnv()
if err != nil {
t.Fatalf("err: %v", err)
}
// don't know if is queriable in a clean way
parts := strings.SplitN(handle.ID(), ":", 2)
var pid dockerPID
err = json.Unmarshal([]byte(parts[1]), &pid)
if err != nil {
t.Fatalf("err: %v", err)
}
container, err := client.InspectContainer(pid.ContainerID)
if err != nil {
t.Fatalf("err: %v", err)
}
if want, got := 2, len(container.Config.Labels); want != got {
t.Errorf("Wrong labels count for docker job. Expect: %d, got: %d", want, got)
}
if want, got := "value1", container.Config.Labels["label1"]; want != got {
t.Errorf("Wrong label value docker job. Expect: %s, got: %s", want, got)
}
defer handle.Kill()
}

View File

@ -49,13 +49,16 @@ specification:
launching more than one of a task (using `count`) with this option set, every
container the task starts will have the same hostname.
* `labels` - (Optional) A key/value map of labels to set to the containers on start.
**Authentication** Registry authentication can be set per task with the
following authentication parameters. These options can provide access to
private repositories that utilize the docker remote api (e.g. dockerhub,
quay.io)
- `auth.username` - (Optional) The account username
- `auth.password` - (Optional) The account password
- `auth.email` - (Optional) The account email
- `auth.username` - (optional) The account username
- `auth.password` - (optional) The account password
- `auth.email` - (optional) The account email
- `auth.server-address` - (Optional) The server domain/ip without the
protocol

View File

@ -3,20 +3,22 @@ layout: "docs"
page_title: "Drivers: Rkt"
sidebar_current: "docs-drivers-rkt"
description: |-
The Rkt task driver is used to run application containers using Rkt.
The rkt task driver is used to run application containers using rkt.
---
# Rkt Driver
# Rkt Driver - Experimental
Name: `rkt`
The `Rkt` driver provides an interface for using CoreOS Rkt for running
The `rkt` driver provides an interface for using CoreOS rkt for running
application containers. Currently, the driver supports launching
containers.
containers but does not support resource isolation or dynamic ports. This can
lead to resource over commitment and port conflicts and as such, this driver is
being marked as experimental and should be used with care.
## Task Configuration
The `Rkt` driver supports the following configuration in the job spec:
The `rkt` driver supports the following configuration in the job spec:
* `trust_prefix` - **(Optional)** The trust prefix to be passed to rkt. Must be reachable from
the box running the nomad agent. If not specified, the image is run without
@ -28,23 +30,23 @@ hash, ACI address or docker registry.
## Task Directories
The `Rkt` driver does not currently support mounting the `alloc/` and `local/`
directory. It is currently blocked by this [Rkt
The `rkt` driver does not currently support mounting the `alloc/` and `local/`
directory. It is currently blocked by this [rkt
issue](https://github.com/coreos/rkt/issues/761). As such the coresponding
[environment variables](/docs/jobspec/environment.html#task_dir) are not set.
## Client Requirements
The `Rkt` driver requires rkt to be installed and in your systems `$PATH`.
The `rkt` driver requires rkt to be installed and in your systems `$PATH`.
The `trust_prefix` must be accessible by the node running Nomad. This can be an
internal source, private to your cluster, but it must be reachable by the client
over HTTP.
## Client Attributes
The `Rkt` driver will set the following client attributes:
The `rkt` driver will set the following client attributes:
* `driver.rkt` - Set to `1` if Rkt is found on the host node. Nomad determines
* `driver.rkt` - Set to `1` if rkt is found on the host node. Nomad determines
this by executing `rkt version` on the host and parsing the output
* `driver.rkt.version` - Version of `rkt` eg: `0.8.1`
* `driver.rkt.appc.version` - Version of `appc` that `rkt` is using eg: `0.8.1`