Samba/CIFS volumes and Nomad
I have a NAS running Unraid where I host a number of shares that are exported with Samba, and I found myself in need to have them accessible for containers on my Nomad cluster.
Nomad has support for managing external storage volumes which adhere to CSI standards. This, in theory, means you could use any Kubernetes-compatible CSI implementation and integrate it (https://developer.hashicorp.com/nomad/docs/concepts/plugins/csi, https://kubernetes-csi.github.io/docs/drivers.html).
After doing some research, I did find a couple of references but did have to do some trial and error to get it to work for me.
The Controller and Node plugin
This will deploy 2 instances of the controller in the cluster.
job "csi-smb-controller" {
region = "global"
datacenters = ["home"]
spread {
attribute = "${node.datacenter}"
weight = 100
}
group "controller" {
count = 2
task "plugin" {
driver = "docker"
config {
image = "mcr.microsoft.com/k8s/csi/smb-csi:v1.7.0"
args = [
"--v=5",
"--nodeid=${attr.unique.hostname}",
"--endpoint=unix:///csi/csi.sock",
"--drivername=smb.csi.k8s.io"
]
privileged = true
}
csi_plugin {
id = "smb"
type = "controller"
mount_dir = "/csi"
}
resources {
memory = 512
cpu = 512
}
}
}
}
On all other nodes (where you want Samba shares to be mounted), it's almost the same, except that the job is of type "system", and csi_plugin is configured as type "node".
job "plugin-smb-nodes" {
region = "global"
datacenters = ["home"]
type = "system"
group "nodes" {
task "plugin" {
driver = "docker"
config {
image = "mcr.microsoft.com/k8s/csi/smb-csi:v1.7.0"
args = [
"--v=5",
"--nodeid=${attr.unique.hostname}",
"--endpoint=unix:///csi/csi.sock",
"--drivername=smb.csi.k8s.io"
]
# node plugins must run as privileged jobs because they
# mount disks to the host
privileged = true
}
csi_plugin {
id = "smb"
type = "node"
mount_dir = "/csi"
}
resources {
memory = 512
cpu = 512
}
}
}
}
Next, you define the Volumes which you want to make accessible to containers.
Let's take "movies" as an example.
Note:
- There's probably a better way to handle the credentials, but I haven't looked too deeply into it
Your movie volume would look something like this:
plugin_id = "smb"
type = "csi"
id = "movies"
name = "movies"
capability {
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
secrets {
username = "<username>"
password = "<password>"
}
parameters {
source = "//<server>"
subDir = "Movies"
}
mount_options {
mount_flags = [
"dir_mode=0777",
"file_mode=0777",
"vers=3.0",
"nounix",
"noserverino"
]
}
In my case, my volume is already being consumed by my Plex container. Hence, the entry in "Allocations"
[root@nomad-mobile ~]# nomad volume status movies
ID = movies
Name = movies
Namespace = default
External ID = <server>/Movies#movies#
Plugin ID = smb
Provider = smb.csi.k8s.io
Version = v1.7.0
Schedulable = true
Controllers Healthy = 2
Controllers Expected = 2
Nodes Healthy = 7
Nodes Expected = 7
Access Mode = multi-node-multi-writer
Attachment Mode = file-system
Mount Options = flags: [REDACTED]
Namespace = default
Allocations
ID Node ID Task Group Version Desired Status Created Modified
2e6f5e6a 3137b334 plex 6 run running 5d23h ago 5d23h ago
Mounting the Volume inside your container
job "plex" {
region = "global"
datacenters = ["home"]
type = "service"
...
group "plex" {
count = 1
network {
port "webui" { static = 32400 }
port "companion" { static = 3005 }
port "roku_companion" { static = 8324 }
port "dlna" { static = 32469 }
port "dlna_alt" { static = 1900 }
port "gdm" { static = 32410 }
port "gdm_alt1" { static = 32412 }
port "gdm_alt2" { static = 32413 }
port "gdm_alt3" { static = 32414 }
}
volume "movies" {
type = "csi"
read_only = false
source = "movies"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
...
task "plex" {
driver = "docker"
config {
image = "linuxserver/plex"
...
}
volume_mount {
volume = "movies"
destination = "/movies"
}
...
env {
VERSION="docker"
PUID=99
PGID=100
...
TZ="Europe/Brussels"
}
resources {
cpu = 10000
memory = 8192
}
}
}
}