# This job deploys Ceph as a Docker container in "demo mode"; it runs all its # processes in a single task and doesn't will not persist data after a restart variable "cluster_id" { type = string # generated from uuid5(dns) with ceph.example.com as the seed default = "e9ba69fa-67ff-5920-b374-84d5801edd19" description = "cluster ID for the Ceph monitor" } variable "hostname" { type = string default = "linux" # hostname of the Nomad repo's Vagrant box description = "hostname of the demo host" } job "ceph" { datacenters = ["dc1"] group "ceph" { network { # we can't configure networking in a way that will both satisfy the Ceph # monitor's requirement to know its own IP address *and* be routable # between containers, without either CNI or fixing # https://github.com/hashicorp/nomad/issues/9781 # # So for now we'll use host networking to keep this demo understandable. # That also means the controller plugin will need to use host addresses. mode = "host" } service { name = "ceph-mon" port = 3300 } service { name = "ceph-dashboard" port = 5000 check { type = "http" interval = "5s" timeout = "1s" path = "/" initial_status = "warning" } } task "ceph" { driver = "docker" config { image = "ceph/daemon:latest-octopus" args = ["demo"] network_mode = "host" privileged = true mount { type = "bind" source = "local/ceph" target = "/etc/ceph" } } resources { memory = 512 cpu = 256 } template { data = <