2019-07-25 14:48:28 +00:00
|
|
|
package taskrunner
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
|
|
|
|
log "github.com/hashicorp/go-hclog"
|
|
|
|
multierror "github.com/hashicorp/go-multierror"
|
|
|
|
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
2020-03-17 14:25:03 +00:00
|
|
|
"github.com/hashicorp/nomad/client/taskenv"
|
2019-07-25 14:48:28 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
"github.com/hashicorp/nomad/plugins/drivers"
|
|
|
|
)
|
|
|
|
|
|
|
|
type volumeHook struct {
|
2020-03-17 14:25:03 +00:00
|
|
|
alloc *structs.Allocation
|
|
|
|
runner *TaskRunner
|
|
|
|
logger log.Logger
|
|
|
|
taskEnv *taskenv.TaskEnv
|
2019-07-25 14:48:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func newVolumeHook(runner *TaskRunner, logger log.Logger) *volumeHook {
|
|
|
|
h := &volumeHook{
|
|
|
|
alloc: runner.Alloc(),
|
|
|
|
runner: runner,
|
|
|
|
}
|
|
|
|
h.logger = logger.Named(h.Name())
|
|
|
|
return h
|
|
|
|
}
|
|
|
|
|
|
|
|
func (*volumeHook) Name() string {
|
|
|
|
return "volumes"
|
|
|
|
}
|
|
|
|
|
2019-08-01 09:33:26 +00:00
|
|
|
func validateHostVolumes(requestedByAlias map[string]*structs.VolumeRequest, clientVolumesByName map[string]*structs.ClientHostVolumeConfig) error {
|
2019-07-25 14:48:28 +00:00
|
|
|
var result error
|
|
|
|
|
config: Hoist volume.config.source into volume
Currently, using a Volume in a job uses the following configuration:
```
volume "alias-name" {
type = "volume-type"
read_only = true
config {
source = "host_volume_name"
}
}
```
This commit migrates to the following:
```
volume "alias-name" {
type = "volume-type"
source = "host_volume_name"
read_only = true
}
```
The original design was based due to being uncertain about the future of storage
plugins, and to allow maxium flexibility.
However, this causes a few issues, namely:
- We frequently need to parse this configuration during submission,
scheduling, and mounting
- It complicates the configuration from and end users perspective
- It complicates the ability to do validation
As we understand the problem space of CSI a little more, it has become
clear that we won't need the `source` to be in config, as it will be
used in the majority of cases:
- Host Volumes: Always need a source
- Preallocated CSI Volumes: Always needs a source from a volume or claim name
- Dynamic Persistent CSI Volumes*: Always needs a source to attach the volumes
to for managing upgrades and to avoid dangling.
- Dynamic Ephemeral CSI Volumes*: Less thought out, but `source` will probably point
to the plugin name, and a `config` block will
allow you to pass meta to the plugin. Or will
point to a pre-configured ephemeral config.
*If implemented
The new design simplifies this by merging the source into the volume
stanza to solve the above issues with usability, performance, and error
handling.
2019-09-13 02:09:58 +00:00
|
|
|
for _, req := range requestedByAlias {
|
2020-01-08 12:47:07 +00:00
|
|
|
// This is a defensive check, but this function should only ever receive
|
|
|
|
// host-type volumes.
|
2019-08-01 09:33:26 +00:00
|
|
|
if req.Type != structs.VolumeTypeHost {
|
2019-07-25 14:48:28 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
config: Hoist volume.config.source into volume
Currently, using a Volume in a job uses the following configuration:
```
volume "alias-name" {
type = "volume-type"
read_only = true
config {
source = "host_volume_name"
}
}
```
This commit migrates to the following:
```
volume "alias-name" {
type = "volume-type"
source = "host_volume_name"
read_only = true
}
```
The original design was based due to being uncertain about the future of storage
plugins, and to allow maxium flexibility.
However, this causes a few issues, namely:
- We frequently need to parse this configuration during submission,
scheduling, and mounting
- It complicates the configuration from and end users perspective
- It complicates the ability to do validation
As we understand the problem space of CSI a little more, it has become
clear that we won't need the `source` to be in config, as it will be
used in the majority of cases:
- Host Volumes: Always need a source
- Preallocated CSI Volumes: Always needs a source from a volume or claim name
- Dynamic Persistent CSI Volumes*: Always needs a source to attach the volumes
to for managing upgrades and to avoid dangling.
- Dynamic Ephemeral CSI Volumes*: Less thought out, but `source` will probably point
to the plugin name, and a `config` block will
allow you to pass meta to the plugin. Or will
point to a pre-configured ephemeral config.
*If implemented
The new design simplifies this by merging the source into the volume
stanza to solve the above issues with usability, performance, and error
handling.
2019-09-13 02:09:58 +00:00
|
|
|
_, ok := clientVolumesByName[req.Source]
|
2019-07-25 14:48:28 +00:00
|
|
|
if !ok {
|
config: Hoist volume.config.source into volume
Currently, using a Volume in a job uses the following configuration:
```
volume "alias-name" {
type = "volume-type"
read_only = true
config {
source = "host_volume_name"
}
}
```
This commit migrates to the following:
```
volume "alias-name" {
type = "volume-type"
source = "host_volume_name"
read_only = true
}
```
The original design was based due to being uncertain about the future of storage
plugins, and to allow maxium flexibility.
However, this causes a few issues, namely:
- We frequently need to parse this configuration during submission,
scheduling, and mounting
- It complicates the configuration from and end users perspective
- It complicates the ability to do validation
As we understand the problem space of CSI a little more, it has become
clear that we won't need the `source` to be in config, as it will be
used in the majority of cases:
- Host Volumes: Always need a source
- Preallocated CSI Volumes: Always needs a source from a volume or claim name
- Dynamic Persistent CSI Volumes*: Always needs a source to attach the volumes
to for managing upgrades and to avoid dangling.
- Dynamic Ephemeral CSI Volumes*: Less thought out, but `source` will probably point
to the plugin name, and a `config` block will
allow you to pass meta to the plugin. Or will
point to a pre-configured ephemeral config.
*If implemented
The new design simplifies this by merging the source into the volume
stanza to solve the above issues with usability, performance, and error
handling.
2019-09-13 02:09:58 +00:00
|
|
|
result = multierror.Append(result, fmt.Errorf("missing %s", req.Source))
|
2019-07-25 14:48:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2019-08-01 09:33:26 +00:00
|
|
|
// hostVolumeMountConfigurations takes the users requested volume mounts,
|
|
|
|
// volumes, and the client host volume configuration and converts them into a
|
|
|
|
// format that can be used by drivers.
|
|
|
|
func (h *volumeHook) hostVolumeMountConfigurations(taskMounts []*structs.VolumeMount, taskVolumesByAlias map[string]*structs.VolumeRequest, clientVolumesByName map[string]*structs.ClientHostVolumeConfig) ([]*drivers.MountConfig, error) {
|
2019-07-25 14:48:28 +00:00
|
|
|
var mounts []*drivers.MountConfig
|
2019-08-01 09:33:26 +00:00
|
|
|
for _, m := range taskMounts {
|
|
|
|
req, ok := taskVolumesByAlias[m.Volume]
|
2019-07-25 14:48:28 +00:00
|
|
|
if !ok {
|
2020-02-13 10:23:50 +00:00
|
|
|
// This function receives only the task volumes that are of type Host,
|
|
|
|
// if we can't find a group volume then we assume the mount is for another
|
|
|
|
// type.
|
|
|
|
continue
|
2019-07-25 14:48:28 +00:00
|
|
|
}
|
|
|
|
|
2020-01-08 12:47:07 +00:00
|
|
|
// This is a defensive check, but this function should only ever receive
|
|
|
|
// host-type volumes.
|
|
|
|
if req.Type != structs.VolumeTypeHost {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
config: Hoist volume.config.source into volume
Currently, using a Volume in a job uses the following configuration:
```
volume "alias-name" {
type = "volume-type"
read_only = true
config {
source = "host_volume_name"
}
}
```
This commit migrates to the following:
```
volume "alias-name" {
type = "volume-type"
source = "host_volume_name"
read_only = true
}
```
The original design was based due to being uncertain about the future of storage
plugins, and to allow maxium flexibility.
However, this causes a few issues, namely:
- We frequently need to parse this configuration during submission,
scheduling, and mounting
- It complicates the configuration from and end users perspective
- It complicates the ability to do validation
As we understand the problem space of CSI a little more, it has become
clear that we won't need the `source` to be in config, as it will be
used in the majority of cases:
- Host Volumes: Always need a source
- Preallocated CSI Volumes: Always needs a source from a volume or claim name
- Dynamic Persistent CSI Volumes*: Always needs a source to attach the volumes
to for managing upgrades and to avoid dangling.
- Dynamic Ephemeral CSI Volumes*: Less thought out, but `source` will probably point
to the plugin name, and a `config` block will
allow you to pass meta to the plugin. Or will
point to a pre-configured ephemeral config.
*If implemented
The new design simplifies this by merging the source into the volume
stanza to solve the above issues with usability, performance, and error
handling.
2019-09-13 02:09:58 +00:00
|
|
|
hostVolume, ok := clientVolumesByName[req.Source]
|
2019-07-25 14:48:28 +00:00
|
|
|
if !ok {
|
|
|
|
// Should never happen, but unless the client volumes were mutated during
|
|
|
|
// the execution of this hook.
|
config: Hoist volume.config.source into volume
Currently, using a Volume in a job uses the following configuration:
```
volume "alias-name" {
type = "volume-type"
read_only = true
config {
source = "host_volume_name"
}
}
```
This commit migrates to the following:
```
volume "alias-name" {
type = "volume-type"
source = "host_volume_name"
read_only = true
}
```
The original design was based due to being uncertain about the future of storage
plugins, and to allow maxium flexibility.
However, this causes a few issues, namely:
- We frequently need to parse this configuration during submission,
scheduling, and mounting
- It complicates the configuration from and end users perspective
- It complicates the ability to do validation
As we understand the problem space of CSI a little more, it has become
clear that we won't need the `source` to be in config, as it will be
used in the majority of cases:
- Host Volumes: Always need a source
- Preallocated CSI Volumes: Always needs a source from a volume or claim name
- Dynamic Persistent CSI Volumes*: Always needs a source to attach the volumes
to for managing upgrades and to avoid dangling.
- Dynamic Ephemeral CSI Volumes*: Less thought out, but `source` will probably point
to the plugin name, and a `config` block will
allow you to pass meta to the plugin. Or will
point to a pre-configured ephemeral config.
*If implemented
The new design simplifies this by merging the source into the volume
stanza to solve the above issues with usability, performance, and error
handling.
2019-09-13 02:09:58 +00:00
|
|
|
return nil, fmt.Errorf("No host volume named: %s", req.Source)
|
2019-07-25 14:48:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mcfg := &drivers.MountConfig{
|
2019-08-01 09:55:42 +00:00
|
|
|
HostPath: hostVolume.Path,
|
2019-07-25 14:48:28 +00:00
|
|
|
TaskPath: m.Destination,
|
2019-08-01 09:33:26 +00:00
|
|
|
Readonly: hostVolume.ReadOnly || req.ReadOnly || m.ReadOnly,
|
2019-07-25 14:48:28 +00:00
|
|
|
}
|
|
|
|
mounts = append(mounts, mcfg)
|
|
|
|
}
|
|
|
|
|
|
|
|
return mounts, nil
|
|
|
|
}
|
|
|
|
|
2020-01-08 12:47:07 +00:00
|
|
|
// partitionVolumesByType takes a map of volume-alias to volume-request and
|
|
|
|
// returns them in the form of volume-type:(volume-alias:volume-request)
|
|
|
|
func partitionVolumesByType(xs map[string]*structs.VolumeRequest) map[string]map[string]*structs.VolumeRequest {
|
|
|
|
result := make(map[string]map[string]*structs.VolumeRequest)
|
|
|
|
for name, req := range xs {
|
|
|
|
txs, ok := result[req.Type]
|
|
|
|
if !ok {
|
|
|
|
txs = make(map[string]*structs.VolumeRequest)
|
|
|
|
result[req.Type] = txs
|
|
|
|
}
|
|
|
|
txs[name] = req
|
|
|
|
}
|
|
|
|
|
|
|
|
return result
|
|
|
|
}
|
2019-07-25 14:48:28 +00:00
|
|
|
|
2020-02-14 11:21:18 +00:00
|
|
|
func (h *volumeHook) prepareHostVolumes(req *interfaces.TaskPrestartRequest, volumes map[string]*structs.VolumeRequest) ([]*drivers.MountConfig, error) {
|
2019-07-25 14:48:28 +00:00
|
|
|
hostVolumes := h.runner.clientConfig.Node.HostVolumes
|
|
|
|
|
|
|
|
// Always validate volumes to ensure that we do not allow volumes to be used
|
|
|
|
// if a host is restarted and loses the host volume configuration.
|
|
|
|
if err := validateHostVolumes(volumes, hostVolumes); err != nil {
|
|
|
|
h.logger.Error("Requested Host Volume does not exist", "existing", hostVolumes, "requested", volumes)
|
2020-01-08 12:47:07 +00:00
|
|
|
return nil, fmt.Errorf("host volume validation error: %v", err)
|
2019-07-25 14:48:28 +00:00
|
|
|
}
|
|
|
|
|
2020-01-08 12:47:07 +00:00
|
|
|
hostVolumeMounts, err := h.hostVolumeMountConfigurations(req.Task.VolumeMounts, volumes, hostVolumes)
|
|
|
|
if err != nil {
|
|
|
|
h.logger.Error("Failed to generate host volume mounts", "error", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return hostVolumeMounts, nil
|
|
|
|
}
|
|
|
|
|
2020-02-13 12:57:41 +00:00
|
|
|
// partitionMountsByVolume takes a list of volume mounts and returns them in the
|
|
|
|
// form of volume-alias:[]volume-mount because one volume may be mounted multiple
|
|
|
|
// times.
|
|
|
|
func partitionMountsByVolume(xs []*structs.VolumeMount) map[string][]*structs.VolumeMount {
|
|
|
|
result := make(map[string][]*structs.VolumeMount)
|
|
|
|
for _, mount := range xs {
|
|
|
|
result[mount.Volume] = append(result[mount.Volume], mount)
|
|
|
|
}
|
|
|
|
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *volumeHook) prepareCSIVolumes(req *interfaces.TaskPrestartRequest, volumes map[string]*structs.VolumeRequest) ([]*drivers.MountConfig, error) {
|
|
|
|
if len(volumes) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var mounts []*drivers.MountConfig
|
|
|
|
|
|
|
|
mountRequests := partitionMountsByVolume(req.Task.VolumeMounts)
|
|
|
|
csiMountPoints := h.runner.allocHookResources.GetCSIMounts()
|
|
|
|
for alias, request := range volumes {
|
|
|
|
mountsForAlias, ok := mountRequests[alias]
|
|
|
|
if !ok {
|
|
|
|
// This task doesn't use the volume
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
csiMountPoint, ok := csiMountPoints[alias]
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("No CSI Mount Point found for volume: %s", alias)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, m := range mountsForAlias {
|
|
|
|
mcfg := &drivers.MountConfig{
|
|
|
|
HostPath: csiMountPoint.Source,
|
|
|
|
TaskPath: m.Destination,
|
|
|
|
Readonly: request.ReadOnly || m.ReadOnly,
|
|
|
|
}
|
|
|
|
mounts = append(mounts, mcfg)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return mounts, nil
|
2020-01-08 12:47:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (h *volumeHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error {
|
2020-03-17 14:25:03 +00:00
|
|
|
h.taskEnv = req.TaskEnv
|
|
|
|
interpolateVolumeMounts(req.Task.VolumeMounts, h.taskEnv)
|
|
|
|
|
2020-01-08 12:47:07 +00:00
|
|
|
volumes := partitionVolumesByType(h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup).Volumes)
|
|
|
|
|
2020-02-14 11:21:18 +00:00
|
|
|
hostVolumeMounts, err := h.prepareHostVolumes(req, volumes[structs.VolumeTypeHost])
|
2020-01-08 12:47:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-02-13 12:57:41 +00:00
|
|
|
csiVolumeMounts, err := h.prepareCSIVolumes(req, volumes[structs.VolumeTypeCSI])
|
2019-07-25 14:48:28 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Because this hook is also ran on restores, we only add mounts that do not
|
|
|
|
// already exist. Although this loop is somewhat expensive, there are only
|
|
|
|
// a small number of mounts that exist within most individual tasks. We may
|
|
|
|
// want to revisit this using a `hookdata` param to be "mount only once"
|
2020-01-08 12:47:07 +00:00
|
|
|
mounts := h.runner.hookResources.getMounts()
|
|
|
|
for _, m := range hostVolumeMounts {
|
|
|
|
mounts = ensureMountpointInserted(mounts, m)
|
|
|
|
}
|
|
|
|
for _, m := range csiVolumeMounts {
|
|
|
|
mounts = ensureMountpointInserted(mounts, m)
|
2019-07-25 14:48:28 +00:00
|
|
|
}
|
|
|
|
h.runner.hookResources.setMounts(mounts)
|
2020-01-08 12:47:07 +00:00
|
|
|
|
2019-07-25 14:48:28 +00:00
|
|
|
return nil
|
|
|
|
}
|
2020-03-17 14:25:03 +00:00
|
|
|
|
|
|
|
func interpolateVolumeMounts(mounts []*structs.VolumeMount, taskEnv *taskenv.TaskEnv) {
|
|
|
|
for _, mount := range mounts {
|
|
|
|
mount.Volume = taskEnv.ReplaceEnv(mount.Volume)
|
|
|
|
mount.Destination = taskEnv.ReplaceEnv(mount.Destination)
|
|
|
|
mount.PropagationMode = taskEnv.ReplaceEnv(mount.PropagationMode)
|
|
|
|
}
|
|
|
|
}
|