Merge pull request #51 from lisongmin/support-volume_mount

support volume_mount in task
This commit is contained in:
Shishir 2021-01-06 19:45:16 -08:00 committed by GitHub
commit c8520c67eb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 180 additions and 1 deletions

View File

@ -229,6 +229,22 @@ func NewPlugin(logger log.Logger) drivers.DriverPlugin {
}
}
func (tc *TaskConfig) setVolumeMounts(cfg *drivers.TaskConfig) {
for _, m := range cfg.Mounts {
hm := Mount{
Type: "bind",
Target: m.TaskPath,
Source: m.HostPath,
Options: []string{"rbind"},
}
if m.Readonly {
hm.Options = append(hm.Options, "ro")
}
tc.Mounts = append(tc.Mounts, hm)
}
}
// PluginInfo returns information describing the plugin.
func (d *Driver) PluginInfo() (*base.PluginInfoResponse, error) {
return pluginInfo, nil
@ -345,6 +361,8 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive
return nil, nil, fmt.Errorf("host_network and bridge network mode are mutually exclusive, and only one of them should be set")
}
driverConfig.setVolumeMounts(cfg)
d.logger.Info("starting task", "driver_cfg", hclog.Fmt("%+v", driverConfig))
handle := drivers.NewTaskHandle(taskHandleVersion)
handle.Config = cfg

16
example/agent_tests.hcl Normal file
View File

@ -0,0 +1,16 @@
log_level = "INFO"
plugin "containerd-driver" {
config {
enabled = true
containerd_runtime = "io.containerd.runc.v2"
stats_interval = "5s"
}
}
client {
host_volume "s1" {
path = "/tmp/host_volume/s1"
read_only = false
}
}

View File

@ -0,0 +1,42 @@
job "volume_mount" {
datacenters = ["dc1"]
group "volume_mount-group" {
volume "data" {
type = "host"
source = "s1"
read_only = false
}
volume "read_only_data" {
type = "host"
source = "s1"
read_only = true
}
task "volume_mount-task" {
driver = "containerd-driver"
config {
image = "docker.io/library/ubuntu:16.04"
command = "sleep"
args = ["600s"]
}
volume_mount {
destination = "/tmp/t1"
volume = "data"
}
volume_mount {
destination = "/tmp/read_only_target"
volume = "read_only_data"
}
resources {
cpu = 500
memory = 256
}
}
}
}

95
tests/005-test-volume_mount.sh Executable file
View File

@ -0,0 +1,95 @@
#!/bin/bash
job_name=volume_mount
host_volume_path=/tmp/host_volume/s1
# test volume_mount
test_volume_mount_nomad_job() {
pushd ~/go/src/github.com/Roblox/nomad-driver-containerd/example
setup_bind_source
echo "INFO: Starting nomad $job_name job using nomad-driver-containerd."
nomad job run $job_name.nomad
# Even though $(nomad job status) reports job status as "running"
# The actual container process might not be running yet.
# We need to wait for actual container to start running before trying exec.
echo "INFO: Wait for ${job_name} container to get into RUNNING state, before trying exec."
is_${job_name}_container_active
echo "INFO: Checking status of $job_name job."
job_status=$(nomad job status -short $job_name|grep Status|awk '{split($0,a,"="); print a[2]}'|tr -d ' ')
if [ "$job_status" != "running" ];then
echo "ERROR: Error in getting ${job_name} job status."
exit 1
fi
# Check if bind mount exists.
echo "INFO: Checking if bind mount exists."
for mountpoint in t1 read_only_target ; do
output=$(nomad alloc exec -job ${job_name} cat /tmp/${mountpoint}/bind.txt)
if [ "$output" != "hello" ]; then
echo "ERROR: bind mount /tmp/${mountpoint} does not exist in container rootfs."
exit 1
fi
done
# Check read only mount can not write.
echo "INFO: Checking read only mount is not writable."
nomad alloc exec -job ${job_name} touch /tmp/read_only_target/writable_test.txt &>/dev/null
if [ -e ${host_volume_path}/writable_test.txt ];then
echo "ERROR: Read only bind mount in /tmp/read_only_target should not be writable."
exit 1
fi
# Check writable mount can write.
echo "INFO: Checking non read_only mount is writable."
nomad alloc exec -job ${job_name} touch /tmp/t1/writable_test.txt
if [ ! -e ${host_volume_path}/writable_test.txt ];then
echo "ERROR: bind mount in /tmp/t1 should be writable."
exit 1
fi
echo "INFO: Stopping nomad ${job_name} job."
nomad job stop ${job_name}
job_status=$(nomad job status -short ${job_name}|grep Status|awk '{split($0,a,"="); print a[2]}'|tr -d ' ')
if [ $job_status != "dead(stopped)" ];then
echo "ERROR: Error in stopping ${job_name} job."
exit 1
fi
echo "INFO: purge nomad ${job_name} job."
nomad job stop -purge ${job_name}
popd
}
setup_bind_source() {
rm -f ${host_volume_path}/bind.txt
rm -f ${host_volume_path}/writable_test.txt
echo hello > ${host_volume_path}/bind.txt
}
is_volume_mount_container_active() {
i="0"
while test $i -lt 5
do
sudo CONTAINERD_NAMESPACE=nomad ctr task ls|grep -q RUNNING
if [ $? -eq 0 ]; then
echo "INFO: ${job_name} container is up and running"
sleep 5s
break
fi
echo "INFO: ${job_name} container is down, sleep for 4 seconds."
sleep 4s
i=$[$i+1]
done
if [ $i -ge 5 ]; then
echo "ERROR: ${job_name} container didn't come up. exit 1."
exit 1
fi
}
test_volume_mount_nomad_job

View File

@ -152,7 +152,7 @@ Documentation=https://nomadproject.io
After=network.target
[Service]
ExecStart=/usr/local/bin/nomad agent -dev -config=/home/circleci/go/src/github.com/Roblox/nomad-driver-containerd/example/agent.hcl -plugin-dir=/tmp/nomad-driver-containerd
ExecStart=/usr/local/bin/nomad agent -dev -config=/home/circleci/go/src/github.com/Roblox/nomad-driver-containerd/example/agent_tests.hcl -plugin-dir=/tmp/nomad-driver-containerd
KillMode=process
Delegate=yes
LimitNOFILE=1048576
@ -165,12 +165,20 @@ WantedBy=multi-user.target
EOF
sudo mv nomad.service /lib/systemd/system/nomad.service
sudo systemctl daemon-reload
prepare_nomad_host_volume
echo "INFO: Starting nomad server and nomad-driver-containerd."
sudo systemctl start nomad
is_systemd_service_active "nomad.service"
popd
}
prepare_nomad_host_volume() {
echo "INFO: Prepare nomad host volume."
mkdir -p /tmp/host_volume/s1
}
is_containerd_driver_active() {
i="0"
while test $i -lt 5