diff --git a/containerd/containerd.go b/containerd/containerd.go index 8d00c6b..50c1cdd 100644 --- a/containerd/containerd.go +++ b/containerd/containerd.go @@ -45,6 +45,7 @@ type ContainerConfig struct { AllocDirDest string Env []string MemoryLimit int64 + MemoryHardLimit int64 CPUShares int64 } @@ -196,7 +197,7 @@ func (d *Driver) createContainer(containerConfig *ContainerConfig, config *TaskC opts = append(opts, oci.WithEnv(containerConfig.Env)) // Set cgroups memory limit. - opts = append(opts, oci.WithMemoryLimit(uint64(containerConfig.MemoryLimit))) + opts = append(opts, WithMemoryLimits(containerConfig.MemoryLimit, containerConfig.MemoryHardLimit)) // Set CPU Shares. opts = append(opts, oci.WithCPUShares(uint64(containerConfig.CPUShares))) diff --git a/containerd/driver.go b/containerd/driver.go index 30a48a6..6ecd340 100644 --- a/containerd/driver.go +++ b/containerd/driver.go @@ -454,7 +454,8 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive // memory and cpu are coming from the resources stanza of the nomad job. // https://www.nomadproject.io/docs/job-specification/resources - containerConfig.MemoryLimit = cfg.Resources.LinuxResources.MemoryLimitBytes + containerConfig.MemoryLimit = cfg.Resources.NomadResources.Memory.MemoryMB * 1024 * 1024 + containerConfig.MemoryHardLimit = cfg.Resources.NomadResources.Memory.MemoryMaxMB * 1024 * 1024 containerConfig.CPUShares = cfg.Resources.LinuxResources.CPUShares container, err := d.createContainer(&containerConfig, &driverConfig) diff --git a/containerd/utils.go b/containerd/utils.go index ef9c4e9..fdbab31 100644 --- a/containerd/utils.go +++ b/containerd/utils.go @@ -60,3 +60,28 @@ func WithSysctls(sysctls map[string]string) oci.SpecOpts { return nil } } + +// WithMemoryLimits accepts soft (`memory`) and hard (`memory_max`) limits as parameters and set the desired +// limits. With `Nomad<1.1.0` releases, soft (`memory`) will act as a hard limit, and if the container process exceeds +// that limit, it will be OOM'ed. With `Nomad>=1.1.0` releases, users can over-provision using `soft` and `hard` +// limits. The container process will only get OOM'ed if the hard limit is exceeded. +func WithMemoryLimits(soft, hard int64) oci.SpecOpts { + return func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error { + if s.Linux != nil { + if s.Linux.Resources == nil { + s.Linux.Resources = &specs.LinuxResources{} + } + if s.Linux.Resources.Memory == nil { + s.Linux.Resources.Memory = &specs.LinuxMemory{} + } + + if hard > 0 { + s.Linux.Resources.Memory.Limit = &hard + s.Linux.Resources.Memory.Reservation = &soft + } else { + s.Linux.Resources.Memory.Limit = &soft + } + } + return nil + } +}