Demo: NFS CSI Plugins (#16875)
Demo (and easily reproduce, locally) a CSI setup with separate controller and node plugins. This runs NFS in a container backed by a host volume and CSI controller and node plugins from rocketduck: gitlab.com/rocketduck/csi-plugin-nfs Co-authored-by: Florian Apolloner <florian@apolloner.eu> Co-authored-by: Tim Gross <tgross@hashicorp.com>
This commit is contained in:
parent
753c17c9de
commit
2c63d34296
|
@ -0,0 +1,89 @@
|
||||||
|
# NFS plugins demo
|
||||||
|
|
||||||
|
As easy* as `../hostpath` to run locally, but with separate Controller and
|
||||||
|
Node plugins from
|
||||||
|
[rocketDuck](https://gitlab.com/rocketduck/csi-plugin-nfs).
|
||||||
|
|
||||||
|
It is backed by NFS test server container
|
||||||
|
[atlassian/nfs-server-test](https://hub.docker.com/r/atlassian/nfs-server-test)
|
||||||
|
for easy setup.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This is the general arrangement on a single node.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
participant host machine
|
||||||
|
participant nfs server
|
||||||
|
participant controller plugin
|
||||||
|
participant node plugin
|
||||||
|
participant web server
|
||||||
|
host machine->>nfs server: /srv/host-nfs host volume
|
||||||
|
nfs server->>nfs server: export /srv/nfs
|
||||||
|
controller plugin->>nfs server: create csi volume<br/>/srv/nfs/csi-nfs
|
||||||
|
node plugin->>host machine: mount nfs server:/srv/nfs/csi-nfs into web alloc dir
|
||||||
|
web server->>nfs server: read/write to /alloc/web-nfs
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Setup Nomad
|
||||||
|
|
||||||
|
Run on linux, as provided in this repo's root Vagrantfile:
|
||||||
|
|
||||||
|
```
|
||||||
|
vagrant up linux
|
||||||
|
```
|
||||||
|
|
||||||
|
Create a dir on the host that we will serve NFS from:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo mkdir -p /srv/host-nfs
|
||||||
|
```
|
||||||
|
|
||||||
|
Run a Nomad agent using the `agent.hcl` in this directory:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo nomad agent -config=agent.hcl
|
||||||
|
```
|
||||||
|
|
||||||
|
You need that agent config to provide the host volume used by NFS,
|
||||||
|
and to allow docker privileged mode.
|
||||||
|
|
||||||
|
### Job setup
|
||||||
|
|
||||||
|
The setup script runs all the things for the demo:
|
||||||
|
|
||||||
|
```
|
||||||
|
./setup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### Demo web servers
|
||||||
|
|
||||||
|
On the host machine (or elsewhere if you have ports open), a couple copies
|
||||||
|
of a web server show the date stamp of the time of its first launch.
|
||||||
|
|
||||||
|
You can get the assigned ports by checking the service:
|
||||||
|
|
||||||
|
```
|
||||||
|
nomad service info web
|
||||||
|
```
|
||||||
|
|
||||||
|
Then curl to see the output, e.g. from the host:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ curl localhost:29291
|
||||||
|
hello from Wed Apr 12 23:18:01 UTC 2023
|
||||||
|
```
|
||||||
|
|
||||||
|
The web index is stored in NFS, so the same date will be shown on multiple webs
|
||||||
|
across restarts or reschedules or stops and re-runs. The file persists until
|
||||||
|
the volume is deleted, either manually or during the following clean-up.
|
||||||
|
|
||||||
|
### Clean up
|
||||||
|
|
||||||
|
`./teardown.sh` deletes all the things created during Job setup.
|
||||||
|
|
||||||
|
It does not delete the Nomad data dir from `/tmp/nomad`,
|
||||||
|
nor `/srv/host-nfs`.
|
|
@ -0,0 +1,20 @@
|
||||||
|
data_dir = "/tmp/nomad/data"
|
||||||
|
|
||||||
|
server {
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
bootstrap_expect = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
client {
|
||||||
|
enabled = true
|
||||||
|
host_volume "host-nfs" {
|
||||||
|
path = "/srv/host-nfs"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
plugin "docker" {
|
||||||
|
config {
|
||||||
|
allow_privileged = true
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
# Controller plugins create and manage CSI volumes.
|
||||||
|
# This one just creates folders within the NFS mount.
|
||||||
|
job "controller" {
|
||||||
|
group "controller" {
|
||||||
|
# count = 2 # usually you want a couple controllers for redundancy
|
||||||
|
task "controller" {
|
||||||
|
driver = "docker"
|
||||||
|
csi_plugin {
|
||||||
|
id = "rocketduck-nfs"
|
||||||
|
type = "controller"
|
||||||
|
}
|
||||||
|
config {
|
||||||
|
# thanks rocketDuck for aiming directly at Nomad :)
|
||||||
|
# https://gitlab.com/rocketduck/csi-plugin-nfs
|
||||||
|
image = "registry.gitlab.com/rocketduck/csi-plugin-nfs:0.6.1"
|
||||||
|
args = [
|
||||||
|
"--type=controller",
|
||||||
|
"--endpoint=${CSI_ENDPOINT}", # provided by csi_plugin{}
|
||||||
|
"--node-id=${attr.unique.hostname}",
|
||||||
|
"--nfs-server=${NFS_ADDRESS}:/srv/nfs",
|
||||||
|
"--log-level=DEBUG",
|
||||||
|
]
|
||||||
|
privileged = true # this particular controller mounts NFS in itself
|
||||||
|
}
|
||||||
|
template {
|
||||||
|
data = "NFS_ADDRESS={{- range nomadService `nfs` }}{{ .Address }}{{ end -}}"
|
||||||
|
destination = "local/nfs.addy"
|
||||||
|
env = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,36 @@
|
||||||
|
# A test NFS server that serves a host volume for persistent state.
|
||||||
|
job "nfs" {
|
||||||
|
group "nfs" {
|
||||||
|
service {
|
||||||
|
name = "nfs"
|
||||||
|
port = "nfs"
|
||||||
|
provider = "nomad"
|
||||||
|
}
|
||||||
|
network {
|
||||||
|
port "nfs" {
|
||||||
|
to = 2049
|
||||||
|
static = 2049
|
||||||
|
}
|
||||||
|
}
|
||||||
|
volume "host-nfs" {
|
||||||
|
type = "host"
|
||||||
|
source = "host-nfs"
|
||||||
|
}
|
||||||
|
task "nfs" {
|
||||||
|
driver = "docker"
|
||||||
|
config {
|
||||||
|
image = "atlassian/nfs-server-test:2.1"
|
||||||
|
ports = ["nfs"]
|
||||||
|
privileged = true
|
||||||
|
}
|
||||||
|
env {
|
||||||
|
# this is the container's default, but being explicit is nice.
|
||||||
|
EXPORT_PATH = "/srv/nfs"
|
||||||
|
}
|
||||||
|
volume_mount {
|
||||||
|
volume = "host-nfs"
|
||||||
|
destination = "/srv/nfs"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,37 @@
|
||||||
|
# Node plugins mount the volume on the host to present to other tasks.
|
||||||
|
job "node" {
|
||||||
|
# node plugins should run anywhere your task might be placed, i.e. ~everywhere
|
||||||
|
type = "system"
|
||||||
|
|
||||||
|
group "node" {
|
||||||
|
task "node" {
|
||||||
|
driver = "docker"
|
||||||
|
csi_plugin {
|
||||||
|
id = "rocketduck-nfs"
|
||||||
|
type = "node"
|
||||||
|
}
|
||||||
|
config {
|
||||||
|
# thanks rocketDuck for aiming directly at Nomad :)
|
||||||
|
# https://gitlab.com/rocketduck/csi-plugin-nfs
|
||||||
|
image = "registry.gitlab.com/rocketduck/csi-plugin-nfs:0.6.1"
|
||||||
|
args = [
|
||||||
|
"--type=node",
|
||||||
|
"--endpoint=${CSI_ENDPOINT}", # provided by csi_plugin{}
|
||||||
|
"--node-id=${attr.unique.hostname}",
|
||||||
|
"--nfs-server=${NFS_ADDRESS}:/srv/nfs",
|
||||||
|
"--log-level=DEBUG",
|
||||||
|
]
|
||||||
|
# node plugins are always privileged to mount disks.
|
||||||
|
privileged = true
|
||||||
|
# host networking is required for NFS mounts to keep working
|
||||||
|
# in dependent tasks across restarts of this node plugin.
|
||||||
|
network_mode = "host"
|
||||||
|
}
|
||||||
|
template {
|
||||||
|
data = "NFS_ADDRESS={{- range nomadService `nfs` }}{{ .Address }}{{ end -}}"
|
||||||
|
destination = "local/nfs.addy"
|
||||||
|
env = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,67 @@
|
||||||
|
# Serve the contents of our CSI volume with a little web server.
|
||||||
|
job "web" {
|
||||||
|
group "web" {
|
||||||
|
count = 2
|
||||||
|
|
||||||
|
# request the volume; node plugin will provide it
|
||||||
|
volume "csi-nfs" {
|
||||||
|
type = "csi"
|
||||||
|
source = "csi-nfs"
|
||||||
|
attachment_mode = "file-system"
|
||||||
|
access_mode = "multi-node-multi-writer"
|
||||||
|
}
|
||||||
|
|
||||||
|
network {
|
||||||
|
mode = "bridge"
|
||||||
|
port "http" {
|
||||||
|
to = 80
|
||||||
|
}
|
||||||
|
}
|
||||||
|
service {
|
||||||
|
provider = "nomad"
|
||||||
|
name = "web"
|
||||||
|
port = "http"
|
||||||
|
check {
|
||||||
|
type = "http"
|
||||||
|
path = "/"
|
||||||
|
interval = "2s"
|
||||||
|
timeout = "1s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task "web" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
# mount the volume!
|
||||||
|
volume_mount {
|
||||||
|
volume = "csi-nfs"
|
||||||
|
destination = "${NOMAD_ALLOC_DIR}/web-nfs"
|
||||||
|
}
|
||||||
|
|
||||||
|
# this host user:group maps back to volume parameters.
|
||||||
|
user = "1000:1000"
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "python:slim"
|
||||||
|
command = "/bin/bash"
|
||||||
|
args = ["-x", "local/entrypoint.sh"]
|
||||||
|
ports = ["http"]
|
||||||
|
}
|
||||||
|
# this entrypoint writes `date` to index.html only on the first run,
|
||||||
|
# to demonstrate that state is persisted in NFS across restarts, etc.
|
||||||
|
# afterwards, this can also be seen on the host machine in
|
||||||
|
# /srv/host-nfs/csi-nfs/index.html
|
||||||
|
# or in the other locations node plugin mounts on the host for this task.
|
||||||
|
# $ grep csi-nfs /proc/mounts
|
||||||
|
template {
|
||||||
|
destination = "local/entrypoint.sh"
|
||||||
|
data = <<EOF
|
||||||
|
#!/bin/bash
|
||||||
|
dir="${NOMAD_ALLOC_DIR}/web-nfs"
|
||||||
|
test -f $dir/index.html || echo hello from $(date) > $dir/index.html
|
||||||
|
python -m http.server ${NOMAD_PORT_http} --directory=$dir
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,38 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Set up all the demo components.
|
||||||
|
# This can be run repeatedly as it is fairly idempotent.
|
||||||
|
|
||||||
|
set -xeuo pipefail
|
||||||
|
|
||||||
|
plugin='rocketduck-nfs'
|
||||||
|
|
||||||
|
# run nfs server
|
||||||
|
nomad run jobs/nfs.nomad.hcl
|
||||||
|
|
||||||
|
# run controller plugin
|
||||||
|
nomad run jobs/controller-plugin.nomad.hcl
|
||||||
|
while true; do
|
||||||
|
nomad plugin status "$plugin" | grep 'Controllers Healthy.*1' && break
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
|
||||||
|
# make a volume - the controller plugin handles this request
|
||||||
|
nomad volume status -t '{{.PluginID}}' csi-nfs 2>/dev/null \
|
||||||
|
|| nomad volume create volume.hcl
|
||||||
|
|
||||||
|
# run node plugin
|
||||||
|
nomad run jobs/node-plugin.nomad.hcl
|
||||||
|
while true; do
|
||||||
|
nomad plugin status "$plugin" | grep 'Nodes Healthy.*1' && break
|
||||||
|
sleep 10
|
||||||
|
done
|
||||||
|
|
||||||
|
# run demo web server, which prompts the node plugin to mount the volume
|
||||||
|
nomad run jobs/web.nomad.hcl
|
||||||
|
|
||||||
|
# show volume info now that it's all set up and in use
|
||||||
|
nomad volume status csi-nfs
|
||||||
|
|
||||||
|
# show the web service ports for convenience
|
||||||
|
nomad service info -t '{{ range . }}{{ .Port }} {{ end }}' web
|
|
@ -0,0 +1,23 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Clean up all demo components.
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
|
purge() {
|
||||||
|
nomad stop -purge "$1"
|
||||||
|
}
|
||||||
|
|
||||||
|
purge web
|
||||||
|
while true; do
|
||||||
|
nomad volume status csi-nfs 2>&1 | grep -E 'No (allocations|volumes)' && break
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
purge node
|
||||||
|
|
||||||
|
nomad volume delete csi-nfs
|
||||||
|
purge controller
|
||||||
|
|
||||||
|
purge nfs
|
||||||
|
|
||||||
|
nomad system gc
|
|
@ -0,0 +1,24 @@
|
||||||
|
id = "csi-nfs"
|
||||||
|
name = "csi-nfs"
|
||||||
|
type = "csi"
|
||||||
|
plugin_id = "rocketduck-nfs"
|
||||||
|
|
||||||
|
capability {
|
||||||
|
access_mode = "multi-node-multi-writer"
|
||||||
|
attachment_mode = "file-system"
|
||||||
|
}
|
||||||
|
capability {
|
||||||
|
access_mode = "multi-node-single-writer"
|
||||||
|
attachment_mode = "file-system"
|
||||||
|
}
|
||||||
|
capability {
|
||||||
|
access_mode = "multi-node-reader-only"
|
||||||
|
attachment_mode = "file-system"
|
||||||
|
}
|
||||||
|
|
||||||
|
parameters {
|
||||||
|
# set volume directory user/group/perms (optional)
|
||||||
|
uid = "1000" # vagrant
|
||||||
|
gid = "1000"
|
||||||
|
mode = "770"
|
||||||
|
}
|
Loading…
Reference in New Issue