Merge pull request #36 from Roblox/networking

Add support for networking
This commit is contained in:
Shishir 2020-08-31 11:44:21 -07:00 committed by GitHub
commit cbcc1b3c5d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 50 additions and 10 deletions

View File

@ -113,6 +113,29 @@ mounts = [
]
```
## Networking
`nomad-driver-containerd` supports **host** and **bridge** networks.<br/>
**NOTE:** `host` and `bridge` are mutually exclusive options, and only one of them should be used at a time.
1. **Host** network can be enabled by setting `host_network` to `true` in task config
of the job spec [Check under [`Supported options`](https://github.com/Roblox/nomad-driver-containerd#supported-options)].
2. **Bridge** network can be enabled by setting the `network` stanza in the task group section of the job spec.
```
network {
mode = "bridge"
}
```
You need to install CNI plugins on nomad client nodes under `/opt/cni/bin` before you can use `bridge` networks.
**Instructions for installing CNI plugins.**<br/>
- $ curl -L -o cni-plugins.tgz https://github.com/containernetworking/plugins/releases/download/v0.8.1/cni-plugins-linux-amd64-v0.8.1.tgz<br/>
- sudo mkdir -p /opt/cni/bin<br/>
- sudo tar -C /opt/cni/bin -xzf cni-plugins.tgz
## Tests
```
$ make test
@ -140,11 +163,9 @@ Ubuntu (>= 16.04)
`nomad-driver-containerd` [`v0.1`](https://github.com/Roblox/nomad-driver-containerd/releases/tag/v0.1) is **not** production ready.
There are some open items which are currently being worked on.
1) **Networking**: Networking is **not in scope** of containerd as described [`here`](https://kubernetes.io/blog/2017/11/containerd-container-runtime-options-kubernetes/). However an external CNI plugin can be used to add networking to the container. We are researching on how to enable networking for our internal use-cases, and would publish (open-source) that work at some point.
1) **Port forwarding**: The ability to map a host port to a container port. This is currently not supported, but could be supported in future.
2) **Port forwarding**: The ability to map a host port to a container port. This is currently not supported, but could be supported in future.
3) **Consul connect**: When a user launches a job in `nomad`, s/he can add a [`service stanza`](https://www.nomadproject.io/docs/job-specification/service) which will instruct `nomad` to register the service with `consul` for service discovery. This is currently not supported.
2) **Consul connect**: When a user launches a job in `nomad`, s/he can add a [`service stanza`](https://www.nomadproject.io/docs/job-specification/service) which will instruct `nomad` to register the service with `consul` for service discovery. This is currently not supported.
## License

View File

@ -41,7 +41,7 @@ func (d *Driver) pullImage(imageName string) (containerd.Image, error) {
return d.client.Pull(d.ctxContainerd, imageName, containerd.WithPullUnpack)
}
func (d *Driver) createContainer(image containerd.Image, containerName, containerSnapshotName, containerdRuntime string, env []string, config *TaskConfig) (containerd.Container, error) {
func (d *Driver) createContainer(image containerd.Image, containerName, containerSnapshotName, containerdRuntime, netnsPath string, env []string, config *TaskConfig) (containerd.Container, error) {
if config.Command == "" && len(config.Args) > 0 {
return nil, fmt.Errorf("Command is empty. Cannot set --args without --command.")
}
@ -123,6 +123,16 @@ func (d *Driver) createContainer(image containerd.Image, containerName, containe
opts = append(opts, oci.WithMounts(mounts))
}
// nomad use CNI plugins e.g bridge to setup a network (and network namespace) for the container.
// CNI plugins need to be installed under /opt/cni/bin.
// network namespace is created at /var/run/netns/<id>.
// netnsPath is the path to the network namespace, which containerd joins to provide network
// for the container.
// NOTE: Only bridge networking mode is supported at this point.
if netnsPath != "" {
opts = append(opts, oci.WithLinuxNamespace(specs.LinuxNamespace{Type: specs.NetworkNamespace, Path: netnsPath}))
}
return d.client.NewContainer(
d.ctxContainerd,
containerName,

View File

@ -113,6 +113,7 @@ var (
SendSignals: true,
Exec: true,
FSIsolation: drivers.FSIsolationNone,
NetIsolationModes: []drivers.NetIsolationMode{drivers.NetIsolationModeGroup, drivers.NetIsolationModeTask},
}
)
@ -333,6 +334,10 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive
return nil, nil, fmt.Errorf("failed to decode driver config: %v", err)
}
if driverConfig.HostNetwork && cfg.NetworkIsolation != nil {
return nil, nil, fmt.Errorf("host_network and bridge network mode are mutually exclusive, and only one of them should be set")
}
d.logger.Info("starting task", "driver_cfg", hclog.Fmt("%+v", driverConfig))
handle := drivers.NewTaskHandle(taskHandleVersion)
handle.Config = cfg
@ -355,13 +360,17 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive
}
containerSnapshotName := fmt.Sprintf("%s-snapshot", containerName)
container, err := d.createContainer(image, containerName, containerSnapshotName, d.config.ContainerdRuntime, env, &driverConfig)
var netnsPath string
if cfg.NetworkIsolation != nil && cfg.NetworkIsolation.Path != "" {
netnsPath = cfg.NetworkIsolation.Path
}
container, err := d.createContainer(image, containerName, containerSnapshotName, d.config.ContainerdRuntime, netnsPath, env, &driverConfig)
if err != nil {
return nil, nil, fmt.Errorf("Error in creating container: %v", err)
}
d.logger.Info(fmt.Sprintf("Successfully created container with name: %s", containerName))
task, err := d.createTask(container, cfg.StdoutPath, cfg.StderrPath)
if err != nil {
return nil, nil, fmt.Errorf("Error in creating task: %v", err)