Merge pull request #2503 from thaJeztah/pidslimit
Add support for --limit-pids on service create / update and stacks (swarm)
This commit is contained in:
commit
87db43814b
@ -152,6 +152,9 @@ Resources:
|
||||
{{- if .ResourceLimitMemory }}
|
||||
Memory: {{ .ResourceLimitMemory }}
|
||||
{{- end }}{{ end }}{{ end }}
|
||||
{{- if gt .ResourceLimitPids 0 }}
|
||||
PIDs: {{ .ResourceLimitPids }}
|
||||
{{- end }}
|
||||
{{- if .Networks }}
|
||||
Networks:
|
||||
{{- range $network := .Networks }} {{ $network }}{{ end }} {{ end }}
|
||||
@ -484,7 +487,7 @@ func (ctx *serviceInspectContext) HasResourceLimits() bool {
|
||||
if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Limits == nil {
|
||||
return false
|
||||
}
|
||||
return ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes > 0
|
||||
return ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.Pids > 0
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) ResourceLimitsNanoCPUs() float64 {
|
||||
@ -498,6 +501,10 @@ func (ctx *serviceInspectContext) ResourceLimitMemory() string {
|
||||
return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes))
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) ResourceLimitPids() int64 {
|
||||
return ctx.Service.Spec.TaskTemplate.Resources.Limits.Pids
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) Networks() []string {
|
||||
var out []string
|
||||
for _, n := range ctx.Service.Spec.TaskTemplate.Networks {
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/golden"
|
||||
)
|
||||
|
||||
func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time) string {
|
||||
@ -78,6 +79,13 @@ func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time)
|
||||
Timeout: 1,
|
||||
},
|
||||
},
|
||||
Resources: &swarm.ResourceRequirements{
|
||||
Limits: &swarm.Limit{
|
||||
NanoCPUs: 100000000000,
|
||||
MemoryBytes: 10490000,
|
||||
Pids: 20,
|
||||
},
|
||||
},
|
||||
Networks: []swarm.NetworkAttachmentConfig{
|
||||
{
|
||||
Target: "5vpyomhb6ievnk0i0o60gcnei",
|
||||
@ -136,6 +144,11 @@ func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func TestPrettyPrint(t *testing.T) {
|
||||
s := formatServiceInspect(t, NewFormat("pretty"), time.Now())
|
||||
golden.Assert(t, s, "service-inspect-pretty.golden")
|
||||
}
|
||||
|
||||
func TestPrettyPrintWithNoUpdateConfig(t *testing.T) {
|
||||
s := formatServiceInspect(t, NewFormat("pretty"), time.Now())
|
||||
if strings.Contains(s, "UpdateStatus") {
|
||||
|
@ -225,6 +225,7 @@ func (opts updateOptions) rollbackConfig(flags *pflag.FlagSet) *swarm.UpdateConf
|
||||
type resourceOptions struct {
|
||||
limitCPU opts.NanoCPUs
|
||||
limitMemBytes opts.MemBytes
|
||||
limitPids int64
|
||||
resCPU opts.NanoCPUs
|
||||
resMemBytes opts.MemBytes
|
||||
resGenericResources []string
|
||||
@ -240,6 +241,7 @@ func (r *resourceOptions) ToResourceRequirements() (*swarm.ResourceRequirements,
|
||||
Limits: &swarm.Limit{
|
||||
NanoCPUs: r.limitCPU.Value(),
|
||||
MemoryBytes: r.limitMemBytes.Value(),
|
||||
Pids: r.limitPids,
|
||||
},
|
||||
Reservations: &swarm.Resources{
|
||||
NanoCPUs: r.resCPU.Value(),
|
||||
@ -821,6 +823,9 @@ func addServiceFlags(flags *pflag.FlagSet, opts *serviceOptions, defaultFlagValu
|
||||
flags.Var(&opts.resources.limitMemBytes, flagLimitMemory, "Limit Memory")
|
||||
flags.Var(&opts.resources.resCPU, flagReserveCPU, "Reserve CPUs")
|
||||
flags.Var(&opts.resources.resMemBytes, flagReserveMemory, "Reserve Memory")
|
||||
flags.Int64Var(&opts.resources.limitPids, flagLimitPids, 0, "Limit maximum number of processes (default 0 = unlimited)")
|
||||
flags.SetAnnotation(flagLimitPids, "version", []string{"1.41"})
|
||||
flags.SetAnnotation(flagLimitPids, "swarm", nil)
|
||||
|
||||
flags.Var(&opts.stopGrace, flagStopGracePeriod, flagDesc(flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)"))
|
||||
flags.Var(&opts.replicas, flagReplicas, "Number of tasks")
|
||||
@ -934,6 +939,7 @@ const (
|
||||
flagLabelAdd = "label-add"
|
||||
flagLimitCPU = "limit-cpu"
|
||||
flagLimitMemory = "limit-memory"
|
||||
flagLimitPids = "limit-pids"
|
||||
flagMaxReplicas = "replicas-max-per-node"
|
||||
flagConcurrent = "max-concurrent"
|
||||
flagMode = "mode"
|
||||
|
@ -217,6 +217,16 @@ func TestToServiceNetwork(t *testing.T) {
|
||||
assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id111"}, {Target: "id555"}, {Target: "id999"}}, service.TaskTemplate.Networks))
|
||||
}
|
||||
|
||||
func TestToServicePidsLimit(t *testing.T) {
|
||||
flags := newCreateCommand(nil).Flags()
|
||||
opt := newServiceOptions()
|
||||
opt.mode = "replicated"
|
||||
opt.resources.limitPids = 100
|
||||
service, err := opt.ToService(context.Background(), &fakeClient{}, flags)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, service.TaskTemplate.Resources.Limits.Pids, int64(100))
|
||||
}
|
||||
|
||||
func TestToServiceUpdateRollback(t *testing.T) {
|
||||
expected := swarm.ServiceSpec{
|
||||
UpdateConfig: &swarm.UpdateConfig{
|
||||
|
42
cli/command/service/testdata/service-inspect-pretty.golden
vendored
Normal file
42
cli/command/service/testdata/service-inspect-pretty.golden
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
|
||||
ID: de179gar9d0o7ltdybungplod
|
||||
Name: my_service
|
||||
Labels:
|
||||
com.label=foo
|
||||
Service Mode: Replicated
|
||||
Replicas: 2
|
||||
Placement:
|
||||
ContainerSpec:
|
||||
Image: foo/bar@sha256:this_is_a_test
|
||||
Configs:
|
||||
Target: /configtest.conf
|
||||
Source: configtest.conf
|
||||
Secrets:
|
||||
Target: /secrettest.conf
|
||||
Source: secrettest.conf
|
||||
Log Driver:
|
||||
Name: driver
|
||||
LogOpts:
|
||||
max-file: 5
|
||||
|
||||
Resources:
|
||||
Limits:
|
||||
CPU: 100
|
||||
Memory: 10MiB
|
||||
PIDs: 20
|
||||
Networks: mynetwork
|
||||
Endpoint Mode: vip
|
||||
Ports:
|
||||
PublishedPort = 30000
|
||||
Protocol = tcp
|
||||
TargetPort = 5000
|
||||
PublishMode =
|
||||
Healthcheck:
|
||||
Interval = 4ns
|
||||
Retries = 3
|
||||
StartPeriod = 2ns
|
||||
Timeout = 1ns
|
||||
Tests:
|
||||
Test = CMD-SHELL
|
||||
Test = curl
|
||||
|
@ -283,6 +283,12 @@ func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags
|
||||
}
|
||||
}
|
||||
|
||||
updateInt64 := func(flag string, field *int64) {
|
||||
if flags.Changed(flag) {
|
||||
*field, _ = flags.GetInt64(flag)
|
||||
}
|
||||
}
|
||||
|
||||
updateUint64 := func(flag string, field *uint64) {
|
||||
if flags.Changed(flag) {
|
||||
*field, _ = flags.GetUint64(flag)
|
||||
@ -339,10 +345,11 @@ func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags
|
||||
|
||||
updateSysCtls(flags, &task.ContainerSpec.Sysctls)
|
||||
|
||||
if anyChanged(flags, flagLimitCPU, flagLimitMemory) {
|
||||
if anyChanged(flags, flagLimitCPU, flagLimitMemory, flagLimitPids) {
|
||||
taskResources().Limits = spec.TaskTemplate.Resources.Limits
|
||||
updateInt64Value(flagLimitCPU, &task.Resources.Limits.NanoCPUs)
|
||||
updateInt64Value(flagLimitMemory, &task.Resources.Limits.MemoryBytes)
|
||||
updateInt64(flagLimitPids, &task.Resources.Limits.Pids)
|
||||
}
|
||||
|
||||
if anyChanged(flags, flagReserveCPU, flagReserveMemory) {
|
||||
|
@ -620,45 +620,53 @@ func TestUpdateIsolationValid(t *testing.T) {
|
||||
// TestUpdateLimitsReservations tests that limits and reservations are updated,
|
||||
// and that values are not updated are not reset to their default value
|
||||
func TestUpdateLimitsReservations(t *testing.T) {
|
||||
spec := swarm.ServiceSpec{
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{},
|
||||
},
|
||||
}
|
||||
|
||||
// test that updating works if the service did not previously
|
||||
// have limits set (https://github.com/moby/moby/issues/38363)
|
||||
flags := newUpdateCommand(nil).Flags()
|
||||
err := flags.Set(flagLimitCPU, "2")
|
||||
assert.NilError(t, err)
|
||||
err = flags.Set(flagLimitMemory, "200M")
|
||||
assert.NilError(t, err)
|
||||
err = updateService(context.Background(), nil, flags, &spec)
|
||||
assert.NilError(t, err)
|
||||
|
||||
spec = swarm.ServiceSpec{
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{},
|
||||
},
|
||||
}
|
||||
t.Run("update limits from scratch", func(t *testing.T) {
|
||||
spec := swarm.ServiceSpec{
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{},
|
||||
},
|
||||
}
|
||||
flags := newUpdateCommand(nil).Flags()
|
||||
err := flags.Set(flagLimitCPU, "2")
|
||||
assert.NilError(t, err)
|
||||
err = flags.Set(flagLimitMemory, "200M")
|
||||
assert.NilError(t, err)
|
||||
err = flags.Set(flagLimitPids, "100")
|
||||
assert.NilError(t, err)
|
||||
err = updateService(context.Background(), nil, flags, &spec)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(100)))
|
||||
})
|
||||
|
||||
// test that updating works if the service did not previously
|
||||
// have reservations set (https://github.com/moby/moby/issues/38363)
|
||||
flags = newUpdateCommand(nil).Flags()
|
||||
err = flags.Set(flagReserveCPU, "2")
|
||||
assert.NilError(t, err)
|
||||
err = flags.Set(flagReserveMemory, "200M")
|
||||
assert.NilError(t, err)
|
||||
err = updateService(context.Background(), nil, flags, &spec)
|
||||
assert.NilError(t, err)
|
||||
t.Run("update reservations from scratch", func(t *testing.T) {
|
||||
spec := swarm.ServiceSpec{
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{},
|
||||
},
|
||||
}
|
||||
flags := newUpdateCommand(nil).Flags()
|
||||
err := flags.Set(flagReserveCPU, "2")
|
||||
assert.NilError(t, err)
|
||||
err = flags.Set(flagReserveMemory, "200M")
|
||||
assert.NilError(t, err)
|
||||
err = updateService(context.Background(), nil, flags, &spec)
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
spec = swarm.ServiceSpec{
|
||||
spec := swarm.ServiceSpec{
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{},
|
||||
Resources: &swarm.ResourceRequirements{
|
||||
Limits: &swarm.Limit{
|
||||
NanoCPUs: 1000000000,
|
||||
MemoryBytes: 104857600,
|
||||
Pids: 100,
|
||||
},
|
||||
Reservations: &swarm.Resources{
|
||||
NanoCPUs: 1000000000,
|
||||
@ -668,29 +676,79 @@ func TestUpdateLimitsReservations(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
flags = newUpdateCommand(nil).Flags()
|
||||
err = flags.Set(flagLimitCPU, "2")
|
||||
assert.NilError(t, err)
|
||||
err = flags.Set(flagReserveCPU, "2")
|
||||
assert.NilError(t, err)
|
||||
err = updateService(context.Background(), nil, flags, &spec)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(104857600)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(104857600)))
|
||||
// Updating without flags set should not modify existing values
|
||||
t.Run("update without flags set", func(t *testing.T) {
|
||||
flags := newUpdateCommand(nil).Flags()
|
||||
err := updateService(context.Background(), nil, flags, &spec)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(1000000000)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(104857600)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(100)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(1000000000)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(104857600)))
|
||||
})
|
||||
|
||||
flags = newUpdateCommand(nil).Flags()
|
||||
err = flags.Set(flagLimitMemory, "200M")
|
||||
assert.NilError(t, err)
|
||||
err = flags.Set(flagReserveMemory, "200M")
|
||||
assert.NilError(t, err)
|
||||
err = updateService(context.Background(), nil, flags, &spec)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(209715200)))
|
||||
// Updating CPU limit/reservation should not affect memory limit/reservation
|
||||
// and pids-limt
|
||||
t.Run("update cpu limit and reservation", func(t *testing.T) {
|
||||
flags := newUpdateCommand(nil).Flags()
|
||||
err := flags.Set(flagLimitCPU, "2")
|
||||
assert.NilError(t, err)
|
||||
err = flags.Set(flagReserveCPU, "2")
|
||||
assert.NilError(t, err)
|
||||
err = updateService(context.Background(), nil, flags, &spec)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(104857600)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(100)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(104857600)))
|
||||
})
|
||||
|
||||
// Updating Memory limit/reservation should not affect CPU limit/reservation
|
||||
// and pids-limt
|
||||
t.Run("update memory limit and reservation", func(t *testing.T) {
|
||||
flags := newUpdateCommand(nil).Flags()
|
||||
err := flags.Set(flagLimitMemory, "200M")
|
||||
assert.NilError(t, err)
|
||||
err = flags.Set(flagReserveMemory, "200M")
|
||||
assert.NilError(t, err)
|
||||
err = updateService(context.Background(), nil, flags, &spec)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(100)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(209715200)))
|
||||
})
|
||||
|
||||
// Updating PidsLimit should only modify PidsLimit, other values unchanged
|
||||
t.Run("update pids limit", func(t *testing.T) {
|
||||
flags := newUpdateCommand(nil).Flags()
|
||||
err := flags.Set(flagLimitPids, "2")
|
||||
assert.NilError(t, err)
|
||||
err = updateService(context.Background(), nil, flags, &spec)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(2)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(209715200)))
|
||||
})
|
||||
|
||||
t.Run("update pids limit to default", func(t *testing.T) {
|
||||
// Updating PidsLimit to 0 should work
|
||||
flags := newUpdateCommand(nil).Flags()
|
||||
err := flags.Set(flagLimitPids, "0")
|
||||
assert.NilError(t, err)
|
||||
err = updateService(context.Background(), nil, flags, &spec)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(0)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000)))
|
||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(209715200)))
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpdateIsolationInvalid(t *testing.T) {
|
||||
|
@ -489,11 +489,22 @@ func fromComposeConstraints(s []string) *latest.Constraints {
|
||||
|
||||
func fromComposeResources(r composeTypes.Resources) latest.Resources {
|
||||
return latest.Resources{
|
||||
Limits: fromComposeResourcesResource(r.Limits),
|
||||
Limits: fromComposeResourcesResourceLimit(r.Limits),
|
||||
Reservations: fromComposeResourcesResource(r.Reservations),
|
||||
}
|
||||
}
|
||||
|
||||
// TODO create ResourceLimit type and support for limiting Pids on k8s
|
||||
func fromComposeResourcesResourceLimit(r *composeTypes.ResourceLimit) *latest.Resource {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
return &latest.Resource{
|
||||
MemoryBytes: int64(r.MemoryBytes),
|
||||
NanoCPUs: r.NanoCPUs,
|
||||
}
|
||||
}
|
||||
|
||||
func fromComposeResourcesResource(r *composeTypes.Resource) *latest.Resource {
|
||||
if r == nil {
|
||||
return nil
|
||||
|
@ -534,6 +534,7 @@ func convertResources(source composetypes.Resources) (*swarm.ResourceRequirement
|
||||
resources.Limits = &swarm.Limit{
|
||||
NanoCPUs: cpus,
|
||||
MemoryBytes: int64(source.Limits.MemoryBytes),
|
||||
Pids: source.Limits.Pids,
|
||||
}
|
||||
}
|
||||
if source.Reservations != nil {
|
||||
|
@ -74,7 +74,7 @@ func TestConvertExtraHosts(t *testing.T) {
|
||||
|
||||
func TestConvertResourcesFull(t *testing.T) {
|
||||
source := composetypes.Resources{
|
||||
Limits: &composetypes.Resource{
|
||||
Limits: &composetypes.ResourceLimit{
|
||||
NanoCPUs: "0.003",
|
||||
MemoryBytes: composetypes.UnitBytes(300000000),
|
||||
},
|
||||
@ -101,7 +101,7 @@ func TestConvertResourcesFull(t *testing.T) {
|
||||
|
||||
func TestConvertResourcesOnlyMemory(t *testing.T) {
|
||||
source := composetypes.Resources{
|
||||
Limits: &composetypes.Resource{
|
||||
Limits: &composetypes.ResourceLimit{
|
||||
MemoryBytes: composetypes.UnitBytes(300000000),
|
||||
},
|
||||
Reservations: &composetypes.Resource{
|
||||
|
@ -68,6 +68,7 @@ services:
|
||||
limits:
|
||||
cpus: '0.001'
|
||||
memory: 50M
|
||||
pids: 100
|
||||
reservations:
|
||||
cpus: '0.0001'
|
||||
memory: 20M
|
||||
|
@ -84,9 +84,10 @@ func services(workingDir, homeDir string) []types.ServiceConfig {
|
||||
Order: "start-first",
|
||||
},
|
||||
Resources: types.Resources{
|
||||
Limits: &types.Resource{
|
||||
Limits: &types.ResourceLimit{
|
||||
NanoCPUs: "0.001",
|
||||
MemoryBytes: 50 * 1024 * 1024,
|
||||
Pids: 100,
|
||||
},
|
||||
Reservations: &types.Resource{
|
||||
NanoCPUs: "0.0001",
|
||||
@ -581,6 +582,7 @@ services:
|
||||
limits:
|
||||
cpus: "0.001"
|
||||
memory: "52428800"
|
||||
pids: 100
|
||||
reservations:
|
||||
cpus: "0.0001"
|
||||
memory: "20971520"
|
||||
@ -1070,7 +1072,8 @@ func fullExampleJSON(workingDir string) string {
|
||||
"resources": {
|
||||
"limits": {
|
||||
"cpus": "0.001",
|
||||
"memory": "52428800"
|
||||
"memory": "52428800",
|
||||
"pids": 100
|
||||
},
|
||||
"reservations": {
|
||||
"cpus": "0.0001",
|
||||
|
@ -20,7 +20,7 @@ func TestMarshallConfig(t *testing.T) {
|
||||
assert.Check(t, is.Equal(expected, string(actual)))
|
||||
|
||||
// Make sure the expected still
|
||||
dict, err := ParseYAML([]byte("version: '3.7'\n" + expected))
|
||||
dict, err := ParseYAML([]byte("version: '3.9'\n" + expected))
|
||||
assert.NilError(t, err)
|
||||
_, err = Load(buildConfigDetails(dict, map[string]string{}))
|
||||
assert.NilError(t, err)
|
||||
|
@ -583,45 +583,45 @@ ean7MQBPP+U4w19V/z+t/hsAAP//Fd/bF0ZHAAA=
|
||||
"/data/config_schema_v3.9.json": {
|
||||
name: "config_schema_v3.9.json",
|
||||
local: "data/config_schema_v3.9.json",
|
||||
size: 18362,
|
||||
size: 18407,
|
||||
modtime: 1518458244,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/+xcS4/jNhK++1cISm7pxwAbLJC57XFPu+dteASaKttMUyRTpDztDPq/L/RsiSJFylY/
|
||||
stsBgmlbxUc9+VWx5B+bJEl/1vQIBUm/JunRGPX1/v53LcVt8+2dxMN9jmRvbr/8et9891N6U41jeTWE
|
||||
SrFnh6x5kp3+dvfbXTW8ITFnBRWR3P0O1DTfIfxRMoRq8EN6AtRMinR7s6meKZQK0DDQ6dek2lyS9CTd
|
||||
F4NptUEmDmn99XM9Q5KkGvDE6GCGfqs/3b/Mf9+T3dizDjZbf6+IMYDi39O91Y+/PZDbP/9x+58vt7/d
|
||||
ZbfbX34ePa7ki7Bvls9hzwQzTIp+/bSnfG7/eu4XJnleExM+WntPuIYxzwLMd4mPIZ57snfiuV3fwfOY
|
||||
nZPkZRHUYEf1Tsw0y6+jPw0UwYRNtqF6N4utll+H4SZqhBjuqN6J4Wb56xjedEy795h+e7qt/n2u55yd
|
||||
r5llsL+aiVHMc4nTFXP88uwF6pFkDorLc71zt8waggKESXsxJUm6KxnPbalLAf+qpngYfJkkP+zwPpin
|
||||
fj765DeK/rmHl/45lcLAk6mZml+6EYGkj4B7xiF2BMHG0j0i40ybTGKWM2qc4znZAb9qBkroEbI9yiI4
|
||||
yz5rONHOiboIHsm5IXiAaMnqY5Fp9udIrg8pEwYOgOlNP3brGgxPBkl2lNrES8qaZTJt2L/t0FD9t904
|
||||
JkwpURnJ85EsCCI5V4wxA4V2iylJS8H+KOGfLYnBEux5c5Rq/YkPKEsldFbIPGTqLXGmCFaeHyKWRUHE
|
||||
WuFgCdMRapocTKMY064xfNSvNtqWh5skwhMcISoQ4sJBrvIuWSKNjVlLfTdJ0pLl8cSHJcQTAxRlsQOc
|
||||
+O/YDaeftxvXE0v7hjABmAlShI0eIQdhGOGZVkB9NuNQ2py6WhOMEE8aeQilCAemDZ6dtBtPAIwLfkN5
|
||||
5KBA5DprkrXlp0yaQ5+5rRrKcjF3JjTTVKdCtbfUGphpIEiPF46XBWEixpZAGDwryZro+eHCIohT1lvb
|
||||
YjGAODGUoujOhjgUMxj/pKSG62Nyjylaxm/6ULK1PUtiQarNdmt7vWRqeUMBDnmo0D/hGWficX0TvwT+
|
||||
DIYfgXBzpEegjzPDh1Sj0VKbGCNnBTmEiQQbnzo7KTkQMSZSNDiPlpyYtnI0R3gxvE5XVeVgWnk4VKQ+
|
||||
+52ka5GJTo7sBBiLxqV6yTJd8CAESYJp+Yj0212Tlc/4aP0X5+n22TFFCAvYR2Ls4failYLQCsAjaB2y
|
||||
qDZLmkPOE2IdG/cvSt6WJ81RqgtWVoJw2Ad5460sDv52aueMaNDXZcGDKHT6NdImXGP/PjvWM9Q7Z3yy
|
||||
GphqiLM5d25kG0ber5lLq3H2MI4VdYQYOpiSaN4koXuJUy/woVl8muPZ6o4a9DqJYUR+P58WdhUa9wBV
|
||||
7jjTR8iXjEFpJJU8zjGcNbd4Z5hJEi9CegrZiXE4WBy7YAwCyTMp+DmCUhuCwdKKBloiM+dMKrM6xnTX
|
||||
516s3lGec9xsfNZT/n/qKfqsqbkMW2uTM5FJBSLoG9pIlR2QUMgUIJNOUYwCbF5ikxpMptHsIAgPuZkp
|
||||
1P7CkoIxYWcvOSuY32mcBaUgXmuwmhuizcCzqJA9kyHMJwgRmcGR4IKjo3bMved82kRioHGPQj3fTbuR
|
||||
rZN+EfSyt7H1oh+3U5U6mMTVNJGl++ll+18jQo90VJNvL4rj7UqRsfO1o340IhgXjDXTBgQ9xy+0Y5Mb
|
||||
mKV5V1zWVVORg78U485Non217cN4E1aEpFJ5VHMlG/2R8vpcdBjOn5zakXMmjy2YYEVZpF+TL76MNV4y
|
||||
rwztrRrQDKD3xd7vEh+rkz1nOGfLz/OdKeOuj4WtM1apdq7fY0ga7KGZ7z0J9YUwTXbWZZSzbisM4MkN
|
||||
sMIIDcEgs+6HOuw6hFigP+YtimEFyNJcCk8JmuUA1+6wG7TxdPcxcyY0oLQt6KE3oa7sEjSTGDwCIq/v
|
||||
waLAC4LijBIdAohXFPlRcr4j9DF7uZdd45ZXESScA2e6iEG3aQ6cnC+ynOZCizBeImSERlyJtLoSzEi8
|
||||
fMmCPGXdsjVJwG8bP8UcfGuCqM8ZG182nnG7Z6hNU4aQqv00Dv8rXnWXKicGPk3i0ySGFbo6N9BrmYOz
|
||||
CLBOx6MqY+8r0gIKGe4cubbkP2lY0RVM8F1AfhQBOKgPIAAZzUbW4DlyprSvdItyvWU32ENy1qSYK7U5
|
||||
NfuIiTxXhroq7lRAvFBGR4XW70zk8vtymLWCtBUnFCxodq2gtUHChFncq2CLRSHsAUFQmHXLac1opm60
|
||||
XkFeIZD8Ha6MXNbWAdMKsGfCRrKuiuQlZnPFGxjOQDWXCUwHTFLKsd4d+vbr2a/fKrekCAb6lV3dliEb
|
||||
mref9LGthgVDfHoivIy4Pbmo38RXdYgY/Ox8ISyk045shdQupv8rqgGppcqkWv8GJNxktA3X35kixVqx
|
||||
ObolK3WmGh8h6pY74Slwv3LUXe/I7XozPVp96EtZN72sttEq9jrGevuvq2r2taWr/EaMIfQYValbWDB5
|
||||
g8LnpNDvDGkt1WdEWxDR/ur2//FstX1XNvg+Zk0Vfr31CguNeEfkA+h/DbX+z7llla9yYiCbYecNbHmC
|
||||
PJy23FJ92vLatvxBrMBqaRpYw/RqbU5B0X3Xm+FNWr8Nm8zxqyC+LNS7Kd9FsLVoq5t5zlcMIne/zKD9
|
||||
ufcjXgkmr9BM6tapVaDa9K2j9o8a+ENPN37yEwcVn+I8ufr9MW4fan6eYPxeuUXSvLs0iNrbqOKF64cP
|
||||
7Oal7gcIPP2U4wx/U/3/vPlvAAAA//8VI4RoukcAAA==
|
||||
H4sIAAAAAAAC/+xcSY/jNha++1cISm6ppYEJBkjf5jinmfMU3AJNPdtMUSTzSLnLadR/H2gtiSJFylYt
|
||||
makAQZetx+XxLfzeIv/YJEn6s6ZHKEj6NUmPxqiv9/e/aylum2/vJB7ucyR7c/vl1/vmu5/Sm2ocy6sh
|
||||
VIo9O2TNk+z0t7vf7qrhDYk5K6iI5O53oKb5DuGPkiFUgx/SE6BmUqTbm031TKFUgIaBTr8m1eaSpCfp
|
||||
vhhMqw0ycUjrr5/rGZIk1YAnRgcz9Fv96f5l/vue7MaedbDZ+ntFjAEU/57urX787YHc/vmP2/98uf3t
|
||||
Lrvd/vLz6HF1vgj7Zvkc9kwww6To1097yuf2r+d+YZLnNTHho7X3hGsY8yzAfJf4GOK5J3snntv1HTyP
|
||||
2TlJXhZBCXZU78RMs/w68tNAEUxYZRuqd9PYavl1GG68RojhjuqdGG6Wv47hTce0e4/pt6fb6t/nes7Z
|
||||
+ZpZBvurmRj5PNdxunyO/zz7A/WcZA6Ky3O9c/eZNQQFCJP2x5Qk6a5kPLdPXQr4VzXFw+DLJPlhu/fB
|
||||
PPXz0Se/UvTPPbz0z6kUBp5MzdT80s0RSPoIuGccYkcQbDTdc2ScaZNJzHJGjXM8JzvgV81ACT1CtkdZ
|
||||
BGfZZw0n2jlR58EjOTcEDxB9svpYZJr9OTrXh5QJAwfA9KYfu3UNhieDJDtKbeJPypplMm3Yvm3XUP23
|
||||
3TgmTClRGcnz0VkQRHKuGGMGCu0+piQtBfujhH+2JAZLsOfNUar1Jz6gLJXQWSHzkKq3xJkiWFl+iFgW
|
||||
BRFruYMlTEeIaXIxjXxMu8bwUb/aaFsebpIIS3C4qICLCzu5yrpkiTTWZy213SRJS5bHEx+WEE8UUJTF
|
||||
DnBiv2MznH7eblxPLOkbwgRgJkgRVnqEHIRhhGdaAfXpjENoc+JqVTDieNLISyhFODBt8Oyk3XgcYJzz
|
||||
G55HDgpErrMmWFt+y6Q59JHbqq4sF3N3QjNNdStUe0utgZkGgvR44XhZECZidAmEwbOSrPGeH84tgjhl
|
||||
vbYtPgYQJ4ZSFN3dEIdiBuOflNRwvU/uMUXL+E3vSra2ZUksSLXZbm2vlUw1b3iAQx4q9E94xpl4XF/F
|
||||
L4E/g+FHINwc6RHo48zwIdVotNQmRslZQQ5hIsHGt85OSg5EjIkUDc6jJSemzRzNEV4Mr9NVRTmYVh4O
|
||||
FalPfyfhWmSgkyM7AcaicaleokwXPAhBkmBYPiL9dtdE5TM2Wv/Febp9dkwRwgL2lRh7ub1IpSC0AvAI
|
||||
Woc0qo2S5pDzhFjH+v2LgrflQXOU6IKZlSAc9kHeeC2Lg7+d2DkjGvR1UfDAC51+jdQJ19i/z471DPXO
|
||||
GR+sBqYa4mzOnRvZhpH3a8bSahw9jH1F7SGGBqYkmjcJ6F781At8aBafxni2uKMGvU5gGBHfz4eFXYbG
|
||||
PUCVO870EfIlY1AaSSWPMwxnzi3eGGaCxIuQnkJ2YhwOFscuGINA8kwKfo6g1IZgMLWigZbIzDmTyqyO
|
||||
Md35uRetd6TnHJWNz3zK/08+RZ81NZdha21yJjKpQARtQxupsgMSCpkCZNJ5FCMHm5fYhAaTaTQ7CMJD
|
||||
ZmYKtb8wpWBM2NhLzgrmNxpnQimI1xqs5oZoM/AsymXPRAjzAUJEZHAkuODqqA1z77mfNpEYaNyjUM93
|
||||
025k66RfBL3sbWy96MdtVKUOBnE1TWTqflps/2t46JGMavLtRX68XSnSd762149GBOOEsWbagKDn+IV2
|
||||
bFKBWRp3xUVdNRU5+FMx7tgk2lbbPow3YUVIKpVHNFey0V8pr89Fh+H8wantOWfi2IIJVpRF+jX54otY
|
||||
40/mlaG9lQOaAfQ+3/td4mN1s+cM53T5eb4zZdz1sbB1xkrVzvV7DEmDPTTzvSehvhCmyc4qRjnztsIA
|
||||
ntwAK4zQEAwyqz7UYdchxAL9MasohhUgS3MpPCVolgNcu8Nu0MbT1WPmVGhAaWvQQ69CXdolqCYxeARE
|
||||
XtfBosALguKMEh0CiFck+VFyviP0MXupy65R5VUECefAmS5i0G2aAyfnizSnKWgRxkuEjNCIkkgrK8GM
|
||||
xMuXLMhT1i1bkwTstrFTzMG3Joj6nrHxZWMZt3uG2jRpCKnaT2P3v2Kpu1Q5MfCpEp8qMczQ1bGBXksd
|
||||
nEmAdToeVRlbr0gLKCTGxhWpYrmOir2vqQ9Mult0hSl81coPfFoHEICMZiPV8dxPU9pXKrlcbwYNUJGc
|
||||
NfHoSj1RzT5i3NSVfrFyUhVqL5TRUX74OxO5/L4ck61w2ooTChaOu/agtUHChFnc2GAfi0LYA4KgMGuW
|
||||
0wTTTJJpvey9QiD5O9SXXNrWodgK3WfChr0uF3qJ2lzxuobTUc2FDdMBk/hzLHeHvP1y9su3CkQpgoF+
|
||||
ZVdrZkiH5vUnfWxTZ0EXn54ILyNKLRc1p/hSFBGDn51vj4Vk2pGtEAfGNItFdSu1VJlU65dLwh1J23Cy
|
||||
nilSrOWbo/u3Umdc8hG8brkTnmz4K3vd9a7crpHTI9WHPu9105/VNlrEXsNYb/91Cs6ucbpydcQYQo9R
|
||||
ab2F2ZU3yJJOqgJOl9ZSfXq0BR7tr67/H09X2xdrgy9v1lThd2Gv0NCIF0o+gPzXEOv/nFlW8SonBrIZ
|
||||
dt5AlyfIw6nLLdWnLq+tyx9EC6z+p4E2TOtwcwKKbtLeDMtu/TZsMsdPiPiiUO+mfFVja9FWNvOcr+hE
|
||||
7n6ZQftzL1O8EkxeofPULVMrQbXp+0ztX0Dwu55u/OT3ECo+xXlSJ/4x7jVqfstg/BK6RdK86DTw2tuo
|
||||
5IXrVxLsTqfu1wo8zZfjCH9T/f+8+W8AAAD//6unbt7nRwAA
|
||||
`,
|
||||
},
|
||||
|
||||
|
@ -392,7 +392,8 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cpus": {"type": "string"},
|
||||
"memory": {"type": "string"}
|
||||
"memory": {"type": "string"},
|
||||
"pids": {"type": "integer"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
|
@ -302,11 +302,19 @@ type UpdateConfig struct {
|
||||
|
||||
// Resources the resource limits and reservations
|
||||
type Resources struct {
|
||||
Limits *Resource `yaml:",omitempty" json:"limits,omitempty"`
|
||||
Reservations *Resource `yaml:",omitempty" json:"reservations,omitempty"`
|
||||
Limits *ResourceLimit `yaml:",omitempty" json:"limits,omitempty"`
|
||||
Reservations *Resource `yaml:",omitempty" json:"reservations,omitempty"`
|
||||
}
|
||||
|
||||
// Resource is a resource to be limited or reserved
|
||||
// ResourceLimit is a resource to be limited
|
||||
type ResourceLimit struct {
|
||||
// TODO: types to convert from units and ratios
|
||||
NanoCPUs string `mapstructure:"cpus" yaml:"cpus,omitempty" json:"cpus,omitempty"`
|
||||
MemoryBytes UnitBytes `mapstructure:"memory" yaml:"memory,omitempty" json:"memory,omitempty"`
|
||||
Pids int64 `mapstructure:"pids" yaml:"pids,omitempty" json:"pids,omitempty"`
|
||||
}
|
||||
|
||||
// Resource is a resource to be reserved
|
||||
type Resource struct {
|
||||
// TODO: types to convert from units and ratios
|
||||
NanoCPUs string `mapstructure:"cpus" yaml:"cpus,omitempty" json:"cpus,omitempty"`
|
||||
|
@ -3677,6 +3677,7 @@ _docker_service_update_and_create() {
|
||||
--isolation
|
||||
--limit-cpu
|
||||
--limit-memory
|
||||
--limit-pids
|
||||
--log-driver
|
||||
--log-opt
|
||||
--replicas
|
||||
|
@ -1970,6 +1970,7 @@ __docker_service_subcommand() {
|
||||
"($help)*--label=[Service labels]:label: "
|
||||
"($help)--limit-cpu=[Limit CPUs]:value: "
|
||||
"($help)--limit-memory=[Limit Memory]:value: "
|
||||
"($help)--limit-pids[Limit maximum number of processes (default 0 = unlimited)]"
|
||||
"($help)--log-driver=[Logging driver for service]:logging driver:__docker_complete_log_drivers"
|
||||
"($help)*--log-opt=[Logging driver options]:log driver options:__docker_complete_log_options"
|
||||
"($help)*--mount=[Attach a filesystem mount to the service]:mount: "
|
||||
|
@ -39,6 +39,7 @@ Options:
|
||||
-l, --label list Service labels
|
||||
--limit-cpu decimal Limit CPUs
|
||||
--limit-memory bytes Limit Memory
|
||||
--limit-pids int Limit maximum number of processes (default 0 = unlimited)
|
||||
--log-driver string Logging driver for service
|
||||
--log-opt list Logging driver options
|
||||
--max-concurrent Number of job tasks to run at once (default equal to --replicas)
|
||||
|
@ -52,6 +52,7 @@ Options:
|
||||
--label-rm list Remove a label by its key
|
||||
--limit-cpu decimal Limit CPUs
|
||||
--limit-memory bytes Limit Memory
|
||||
--limit-pids int Limit maximum number of processes (default 0 = unlimited)
|
||||
--log-driver string Logging driver for service
|
||||
--log-opt list Logging driver options
|
||||
--max-concurrent Number of job tasks to run at once (default equal to --replicas)
|
||||
|
Loading…
x
Reference in New Issue
Block a user