github.com/rish1988/moby@v25.0.2+incompatible/daemon/cluster/executor/container/container.go (about)

     1  package container // import "github.com/docker/docker/daemon/cluster/executor/container"
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"net"
     8  	"strconv"
     9  	"strings"
    10  
    11  	"github.com/containerd/log"
    12  	"github.com/distribution/reference"
    13  	"github.com/docker/docker/api/types"
    14  	enginecontainer "github.com/docker/docker/api/types/container"
    15  	"github.com/docker/docker/api/types/events"
    16  	"github.com/docker/docker/api/types/filters"
    17  	enginemount "github.com/docker/docker/api/types/mount"
    18  	"github.com/docker/docker/api/types/network"
    19  	"github.com/docker/docker/api/types/volume"
    20  	"github.com/docker/docker/daemon/cluster/convert"
    21  	executorpkg "github.com/docker/docker/daemon/cluster/executor"
    22  	clustertypes "github.com/docker/docker/daemon/cluster/provider"
    23  	"github.com/docker/docker/libnetwork/scope"
    24  	"github.com/docker/go-connections/nat"
    25  	"github.com/docker/go-units"
    26  	gogotypes "github.com/gogo/protobuf/types"
    27  	"github.com/moby/swarmkit/v2/agent/exec"
    28  	"github.com/moby/swarmkit/v2/api"
    29  	"github.com/moby/swarmkit/v2/api/genericresource"
    30  	"github.com/moby/swarmkit/v2/template"
    31  )
    32  
    33  const (
    34  	// systemLabelPrefix represents the reserved namespace for system labels.
    35  	systemLabelPrefix = "com.docker.swarm"
    36  )
    37  
    38  // containerConfig converts task properties into docker container compatible
    39  // components.
    40  type containerConfig struct {
    41  	task                *api.Task
    42  	networksAttachments map[string]*api.NetworkAttachment
    43  }
    44  
    45  // newContainerConfig returns a validated container config. No methods should
    46  // return an error if this function returns without error.
    47  func newContainerConfig(t *api.Task, node *api.NodeDescription) (*containerConfig, error) {
    48  	var c containerConfig
    49  	return &c, c.setTask(t, node)
    50  }
    51  
    52  func (c *containerConfig) setTask(t *api.Task, node *api.NodeDescription) error {
    53  	if t.Spec.GetContainer() == nil && t.Spec.GetAttachment() == nil {
    54  		return exec.ErrRuntimeUnsupported
    55  	}
    56  
    57  	container := t.Spec.GetContainer()
    58  	if container != nil {
    59  		if container.Image == "" {
    60  			return ErrImageRequired
    61  		}
    62  
    63  		if err := validateMounts(container.Mounts); err != nil {
    64  			return err
    65  		}
    66  	}
    67  
    68  	// index the networks by name
    69  	c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks))
    70  	for _, attachment := range t.Networks {
    71  		c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment
    72  	}
    73  
    74  	c.task = t
    75  
    76  	if t.Spec.GetContainer() != nil {
    77  		preparedSpec, err := template.ExpandContainerSpec(node, t)
    78  		if err != nil {
    79  			return err
    80  		}
    81  		c.task.Spec.Runtime = &api.TaskSpec_Container{
    82  			Container: preparedSpec,
    83  		}
    84  	}
    85  
    86  	return nil
    87  }
    88  
    89  func (c *containerConfig) networkAttachmentContainerID() string {
    90  	attachment := c.task.Spec.GetAttachment()
    91  	if attachment == nil {
    92  		return ""
    93  	}
    94  
    95  	return attachment.ContainerID
    96  }
    97  
    98  func (c *containerConfig) taskID() string {
    99  	return c.task.ID
   100  }
   101  
   102  func (c *containerConfig) spec() *api.ContainerSpec {
   103  	return c.task.Spec.GetContainer()
   104  }
   105  
   106  func (c *containerConfig) nameOrID() string {
   107  	if c.task.Spec.GetContainer() != nil {
   108  		return c.name()
   109  	}
   110  
   111  	return c.networkAttachmentContainerID()
   112  }
   113  
   114  func (c *containerConfig) name() string {
   115  	if c.task.Annotations.Name != "" {
   116  		// if set, use the container Annotations.Name field, set in the orchestrator.
   117  		return c.task.Annotations.Name
   118  	}
   119  
   120  	slot := fmt.Sprint(c.task.Slot)
   121  	if slot == "" || c.task.Slot == 0 {
   122  		slot = c.task.NodeID
   123  	}
   124  
   125  	// fallback to service.slot.id.
   126  	return fmt.Sprintf("%s.%s.%s", c.task.ServiceAnnotations.Name, slot, c.task.ID)
   127  }
   128  
   129  func (c *containerConfig) image() string {
   130  	raw := c.spec().Image
   131  	ref, err := reference.ParseNormalizedNamed(raw)
   132  	if err != nil {
   133  		return raw
   134  	}
   135  	return reference.FamiliarString(reference.TagNameOnly(ref))
   136  }
   137  
   138  func (c *containerConfig) portBindings() nat.PortMap {
   139  	portBindings := nat.PortMap{}
   140  	if c.task.Endpoint == nil {
   141  		return portBindings
   142  	}
   143  
   144  	for _, portConfig := range c.task.Endpoint.Ports {
   145  		if portConfig.PublishMode != api.PublishModeHost {
   146  			continue
   147  		}
   148  
   149  		port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String())))
   150  		binding := []nat.PortBinding{
   151  			{},
   152  		}
   153  
   154  		if portConfig.PublishedPort != 0 {
   155  			binding[0].HostPort = strconv.Itoa(int(portConfig.PublishedPort))
   156  		}
   157  		portBindings[port] = binding
   158  	}
   159  
   160  	return portBindings
   161  }
   162  
   163  func (c *containerConfig) isolation() enginecontainer.Isolation {
   164  	return convert.IsolationFromGRPC(c.spec().Isolation)
   165  }
   166  
   167  func (c *containerConfig) init() *bool {
   168  	if c.spec().Init == nil {
   169  		return nil
   170  	}
   171  	init := c.spec().Init.GetValue()
   172  	return &init
   173  }
   174  
   175  func (c *containerConfig) exposedPorts() map[nat.Port]struct{} {
   176  	exposedPorts := make(map[nat.Port]struct{})
   177  	if c.task.Endpoint == nil {
   178  		return exposedPorts
   179  	}
   180  
   181  	for _, portConfig := range c.task.Endpoint.Ports {
   182  		if portConfig.PublishMode != api.PublishModeHost {
   183  			continue
   184  		}
   185  
   186  		port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String())))
   187  		exposedPorts[port] = struct{}{}
   188  	}
   189  
   190  	return exposedPorts
   191  }
   192  
   193  func (c *containerConfig) config() *enginecontainer.Config {
   194  	genericEnvs := genericresource.EnvFormat(c.task.AssignedGenericResources, "DOCKER_RESOURCE")
   195  	env := append(c.spec().Env, genericEnvs...)
   196  
   197  	config := &enginecontainer.Config{
   198  		Labels:       c.labels(),
   199  		StopSignal:   c.spec().StopSignal,
   200  		Tty:          c.spec().TTY,
   201  		OpenStdin:    c.spec().OpenStdin,
   202  		User:         c.spec().User,
   203  		Env:          env,
   204  		Hostname:     c.spec().Hostname,
   205  		WorkingDir:   c.spec().Dir,
   206  		Image:        c.image(),
   207  		ExposedPorts: c.exposedPorts(),
   208  		Healthcheck:  c.healthcheck(),
   209  	}
   210  
   211  	if len(c.spec().Command) > 0 {
   212  		// If Command is provided, we replace the whole invocation with Command
   213  		// by replacing Entrypoint and specifying Cmd. Args is ignored in this
   214  		// case.
   215  		config.Entrypoint = append(config.Entrypoint, c.spec().Command...)
   216  		config.Cmd = append(config.Cmd, c.spec().Args...)
   217  	} else if len(c.spec().Args) > 0 {
   218  		// In this case, we assume the image has an Entrypoint and Args
   219  		// specifies the arguments for that entrypoint.
   220  		config.Cmd = c.spec().Args
   221  	}
   222  
   223  	return config
   224  }
   225  
   226  func (c *containerConfig) labels() map[string]string {
   227  	var (
   228  		system = map[string]string{
   229  			"task":         "", // mark as cluster task
   230  			"task.id":      c.task.ID,
   231  			"task.name":    c.name(),
   232  			"node.id":      c.task.NodeID,
   233  			"service.id":   c.task.ServiceID,
   234  			"service.name": c.task.ServiceAnnotations.Name,
   235  		}
   236  		labels = make(map[string]string)
   237  	)
   238  
   239  	// base labels are those defined in the spec.
   240  	for k, v := range c.spec().Labels {
   241  		labels[k] = v
   242  	}
   243  
   244  	// we then apply the overrides from the task, which may be set via the
   245  	// orchestrator.
   246  	for k, v := range c.task.Annotations.Labels {
   247  		labels[k] = v
   248  	}
   249  
   250  	// finally, we apply the system labels, which override all labels.
   251  	for k, v := range system {
   252  		labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v
   253  	}
   254  
   255  	return labels
   256  }
   257  
   258  func (c *containerConfig) mounts(deps exec.VolumeGetter) []enginemount.Mount {
   259  	var r []enginemount.Mount
   260  	for _, mount := range c.spec().Mounts {
   261  		if mount.Type == api.MountTypeCluster {
   262  			r = append(r, c.convertCSIMount(mount, deps))
   263  		} else {
   264  			r = append(r, convertMount(mount))
   265  		}
   266  	}
   267  	return r
   268  }
   269  
   270  // convertCSIMount matches the CSI mount with the path of the CSI volume.
   271  //
   272  // technically quadratic with respect to the number of CSI mounts, but that
   273  // number shouldn't ever be large enough for quadratic to matter.
   274  //
   275  // TODO(dperny): figure out a scheme for errors? or maybe add code to
   276  // checkMounts?
   277  func (c *containerConfig) convertCSIMount(m api.Mount, deps exec.VolumeGetter) enginemount.Mount {
   278  	var mount enginemount.Mount
   279  
   280  	// these are actually bind mounts
   281  	mount.Type = enginemount.TypeBind
   282  
   283  	for _, attach := range c.task.Volumes {
   284  		if attach.Source == m.Source && attach.Target == m.Target {
   285  			// we should not get an error here, because we should have checked
   286  			// already that the volume is ready
   287  			path, _ := deps.Get(attach.ID)
   288  			mount.Source = path
   289  			mount.Target = m.Target
   290  		}
   291  	}
   292  
   293  	return mount
   294  }
   295  
   296  func convertMount(m api.Mount) enginemount.Mount {
   297  	mount := enginemount.Mount{
   298  		Source:   m.Source,
   299  		Target:   m.Target,
   300  		ReadOnly: m.ReadOnly,
   301  	}
   302  
   303  	switch m.Type {
   304  	case api.MountTypeBind:
   305  		mount.Type = enginemount.TypeBind
   306  	case api.MountTypeVolume:
   307  		mount.Type = enginemount.TypeVolume
   308  	case api.MountTypeTmpfs:
   309  		mount.Type = enginemount.TypeTmpfs
   310  	case api.MountTypeNamedPipe:
   311  		mount.Type = enginemount.TypeNamedPipe
   312  	case api.MountTypeCluster:
   313  		mount.Type = enginemount.TypeCluster
   314  	}
   315  
   316  	if m.BindOptions != nil {
   317  		mount.BindOptions = &enginemount.BindOptions{
   318  			NonRecursive:           m.BindOptions.NonRecursive,
   319  			CreateMountpoint:       m.BindOptions.CreateMountpoint,
   320  			ReadOnlyNonRecursive:   m.BindOptions.ReadOnlyNonRecursive,
   321  			ReadOnlyForceRecursive: m.BindOptions.ReadOnlyForceRecursive,
   322  		}
   323  		switch m.BindOptions.Propagation {
   324  		case api.MountPropagationRPrivate:
   325  			mount.BindOptions.Propagation = enginemount.PropagationRPrivate
   326  		case api.MountPropagationPrivate:
   327  			mount.BindOptions.Propagation = enginemount.PropagationPrivate
   328  		case api.MountPropagationRSlave:
   329  			mount.BindOptions.Propagation = enginemount.PropagationRSlave
   330  		case api.MountPropagationSlave:
   331  			mount.BindOptions.Propagation = enginemount.PropagationSlave
   332  		case api.MountPropagationRShared:
   333  			mount.BindOptions.Propagation = enginemount.PropagationRShared
   334  		case api.MountPropagationShared:
   335  			mount.BindOptions.Propagation = enginemount.PropagationShared
   336  		}
   337  	}
   338  
   339  	if m.VolumeOptions != nil {
   340  		mount.VolumeOptions = &enginemount.VolumeOptions{
   341  			NoCopy: m.VolumeOptions.NoCopy,
   342  		}
   343  		if m.VolumeOptions.Labels != nil {
   344  			mount.VolumeOptions.Labels = make(map[string]string, len(m.VolumeOptions.Labels))
   345  			for k, v := range m.VolumeOptions.Labels {
   346  				mount.VolumeOptions.Labels[k] = v
   347  			}
   348  		}
   349  		if m.VolumeOptions.DriverConfig != nil {
   350  			mount.VolumeOptions.DriverConfig = &enginemount.Driver{
   351  				Name: m.VolumeOptions.DriverConfig.Name,
   352  			}
   353  			if m.VolumeOptions.DriverConfig.Options != nil {
   354  				mount.VolumeOptions.DriverConfig.Options = make(map[string]string, len(m.VolumeOptions.DriverConfig.Options))
   355  				for k, v := range m.VolumeOptions.DriverConfig.Options {
   356  					mount.VolumeOptions.DriverConfig.Options[k] = v
   357  				}
   358  			}
   359  		}
   360  	}
   361  
   362  	if m.TmpfsOptions != nil {
   363  		mount.TmpfsOptions = &enginemount.TmpfsOptions{
   364  			SizeBytes: m.TmpfsOptions.SizeBytes,
   365  			Mode:      m.TmpfsOptions.Mode,
   366  		}
   367  	}
   368  
   369  	return mount
   370  }
   371  
   372  func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig {
   373  	hcSpec := c.spec().Healthcheck
   374  	if hcSpec == nil {
   375  		return nil
   376  	}
   377  	interval, _ := gogotypes.DurationFromProto(hcSpec.Interval)
   378  	timeout, _ := gogotypes.DurationFromProto(hcSpec.Timeout)
   379  	startPeriod, _ := gogotypes.DurationFromProto(hcSpec.StartPeriod)
   380  	startInterval, _ := gogotypes.DurationFromProto(hcSpec.StartInterval)
   381  	return &enginecontainer.HealthConfig{
   382  		Test:          hcSpec.Test,
   383  		Interval:      interval,
   384  		Timeout:       timeout,
   385  		Retries:       int(hcSpec.Retries),
   386  		StartPeriod:   startPeriod,
   387  		StartInterval: startInterval,
   388  	}
   389  }
   390  
   391  func (c *containerConfig) hostConfig(deps exec.VolumeGetter) *enginecontainer.HostConfig {
   392  	hc := &enginecontainer.HostConfig{
   393  		Resources:      c.resources(),
   394  		GroupAdd:       c.spec().Groups,
   395  		PortBindings:   c.portBindings(),
   396  		Mounts:         c.mounts(deps),
   397  		ReadonlyRootfs: c.spec().ReadOnly,
   398  		Isolation:      c.isolation(),
   399  		Init:           c.init(),
   400  		Sysctls:        c.spec().Sysctls,
   401  		CapAdd:         c.spec().CapabilityAdd,
   402  		CapDrop:        c.spec().CapabilityDrop,
   403  	}
   404  
   405  	if c.spec().DNSConfig != nil {
   406  		hc.DNS = c.spec().DNSConfig.Nameservers
   407  		hc.DNSSearch = c.spec().DNSConfig.Search
   408  		hc.DNSOptions = c.spec().DNSConfig.Options
   409  	}
   410  
   411  	c.applyPrivileges(hc)
   412  
   413  	// The format of extra hosts on swarmkit is specified in:
   414  	// http://man7.org/linux/man-pages/man5/hosts.5.html
   415  	//    IP_address canonical_hostname [aliases...]
   416  	// However, the format of ExtraHosts in HostConfig is
   417  	//    <host>:<ip>
   418  	// We need to do the conversion here
   419  	// (Alias is ignored for now)
   420  	for _, entry := range c.spec().Hosts {
   421  		parts := strings.Fields(entry)
   422  		if len(parts) > 1 {
   423  			hc.ExtraHosts = append(hc.ExtraHosts, fmt.Sprintf("%s:%s", parts[1], parts[0]))
   424  		}
   425  	}
   426  
   427  	if c.task.LogDriver != nil {
   428  		hc.LogConfig = enginecontainer.LogConfig{
   429  			Type:   c.task.LogDriver.Name,
   430  			Config: c.task.LogDriver.Options,
   431  		}
   432  	}
   433  
   434  	if len(c.task.Networks) > 0 {
   435  		labels := c.task.Networks[0].Network.Spec.Annotations.Labels
   436  		name := c.task.Networks[0].Network.Spec.Annotations.Name
   437  		if v, ok := labels["com.docker.swarm.predefined"]; ok && v == "true" {
   438  			hc.NetworkMode = enginecontainer.NetworkMode(name)
   439  		}
   440  	}
   441  
   442  	return hc
   443  }
   444  
   445  // This handles the case of volumes that are defined inside a service Mount
   446  func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *volume.CreateOptions {
   447  	var (
   448  		driverName string
   449  		driverOpts map[string]string
   450  		labels     map[string]string
   451  	)
   452  
   453  	if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil {
   454  		driverName = mount.VolumeOptions.DriverConfig.Name
   455  		driverOpts = mount.VolumeOptions.DriverConfig.Options
   456  		labels = mount.VolumeOptions.Labels
   457  	}
   458  
   459  	if mount.VolumeOptions != nil {
   460  		return &volume.CreateOptions{
   461  			Name:       mount.Source,
   462  			Driver:     driverName,
   463  			DriverOpts: driverOpts,
   464  			Labels:     labels,
   465  		}
   466  	}
   467  	return nil
   468  }
   469  
   470  func (c *containerConfig) resources() enginecontainer.Resources {
   471  	resources := enginecontainer.Resources{}
   472  
   473  	// set pids limit
   474  	pidsLimit := c.spec().PidsLimit
   475  	if pidsLimit > 0 {
   476  		resources.PidsLimit = &pidsLimit
   477  	}
   478  
   479  	resources.Ulimits = make([]*units.Ulimit, len(c.spec().Ulimits))
   480  	for i, ulimit := range c.spec().Ulimits {
   481  		resources.Ulimits[i] = &units.Ulimit{
   482  			Name: ulimit.Name,
   483  			Soft: ulimit.Soft,
   484  			Hard: ulimit.Hard,
   485  		}
   486  	}
   487  
   488  	// If no limits are specified let the engine use its defaults.
   489  	//
   490  	// TODO(aluzzardi): We might want to set some limits anyway otherwise
   491  	// "unlimited" tasks will step over the reservation of other tasks.
   492  	r := c.task.Spec.Resources
   493  	if r == nil || r.Limits == nil {
   494  		return resources
   495  	}
   496  
   497  	if r.Limits.MemoryBytes > 0 {
   498  		resources.Memory = r.Limits.MemoryBytes
   499  	}
   500  
   501  	if r.Limits.NanoCPUs > 0 {
   502  		resources.NanoCPUs = r.Limits.NanoCPUs
   503  	}
   504  
   505  	return resources
   506  }
   507  
   508  func (c *containerConfig) createNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig {
   509  	var networks []*api.NetworkAttachment
   510  	if c.task.Spec.GetContainer() != nil || c.task.Spec.GetAttachment() != nil {
   511  		networks = c.task.Networks
   512  	}
   513  
   514  	epConfig := make(map[string]*network.EndpointSettings)
   515  	for _, na := range networks {
   516  		epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na, b)
   517  	}
   518  
   519  	return &network.NetworkingConfig{EndpointsConfig: epConfig}
   520  }
   521  
   522  func getEndpointConfig(na *api.NetworkAttachment, b executorpkg.Backend) *network.EndpointSettings {
   523  	var ipv4, ipv6 string
   524  	for _, addr := range na.Addresses {
   525  		ip, _, err := net.ParseCIDR(addr)
   526  		if err != nil {
   527  			continue
   528  		}
   529  
   530  		if ip.To4() != nil {
   531  			ipv4 = ip.String()
   532  			continue
   533  		}
   534  
   535  		if ip.To16() != nil {
   536  			ipv6 = ip.String()
   537  		}
   538  	}
   539  
   540  	n := &network.EndpointSettings{
   541  		NetworkID: na.Network.ID,
   542  		IPAMConfig: &network.EndpointIPAMConfig{
   543  			IPv4Address: ipv4,
   544  			IPv6Address: ipv6,
   545  		},
   546  		DriverOpts: na.DriverAttachmentOpts,
   547  	}
   548  	if v, ok := na.Network.Spec.Annotations.Labels["com.docker.swarm.predefined"]; ok && v == "true" {
   549  		if ln, err := b.FindNetwork(na.Network.Spec.Annotations.Name); err == nil {
   550  			n.NetworkID = ln.ID()
   551  		}
   552  	}
   553  	return n
   554  }
   555  
   556  func (c *containerConfig) virtualIP(networkID string) string {
   557  	if c.task.Endpoint == nil {
   558  		return ""
   559  	}
   560  
   561  	for _, eVip := range c.task.Endpoint.VirtualIPs {
   562  		// We only support IPv4 VIPs for now.
   563  		if eVip.NetworkID == networkID {
   564  			vip, _, err := net.ParseCIDR(eVip.Addr)
   565  			if err != nil {
   566  				return ""
   567  			}
   568  
   569  			return vip.String()
   570  		}
   571  	}
   572  
   573  	return ""
   574  }
   575  
   576  func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig {
   577  	if len(c.task.Networks) == 0 {
   578  		return nil
   579  	}
   580  
   581  	log.G(context.TODO()).Debugf("Creating service config in agent for t = %+v", c.task)
   582  	svcCfg := &clustertypes.ServiceConfig{
   583  		Name:             c.task.ServiceAnnotations.Name,
   584  		Aliases:          make(map[string][]string),
   585  		ID:               c.task.ServiceID,
   586  		VirtualAddresses: make(map[string]*clustertypes.VirtualAddress),
   587  	}
   588  
   589  	for _, na := range c.task.Networks {
   590  		svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{
   591  			// We support only IPv4 virtual IP for now.
   592  			IPv4: c.virtualIP(na.Network.ID),
   593  		}
   594  		if len(na.Aliases) > 0 {
   595  			svcCfg.Aliases[na.Network.ID] = na.Aliases
   596  		}
   597  	}
   598  
   599  	if c.task.Endpoint != nil {
   600  		for _, ePort := range c.task.Endpoint.Ports {
   601  			if ePort.PublishMode != api.PublishModeIngress {
   602  				continue
   603  			}
   604  
   605  			svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{
   606  				Name:          ePort.Name,
   607  				Protocol:      int32(ePort.Protocol),
   608  				TargetPort:    ePort.TargetPort,
   609  				PublishedPort: ePort.PublishedPort,
   610  			})
   611  		}
   612  	}
   613  
   614  	return svcCfg
   615  }
   616  
   617  func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) {
   618  	na, ok := c.networksAttachments[name]
   619  	if !ok {
   620  		return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced")
   621  	}
   622  
   623  	options := types.NetworkCreate{
   624  		// ID:     na.Network.ID,
   625  		Labels:     na.Network.Spec.Annotations.Labels,
   626  		Internal:   na.Network.Spec.Internal,
   627  		Attachable: na.Network.Spec.Attachable,
   628  		Ingress:    convert.IsIngressNetwork(na.Network),
   629  		EnableIPv6: na.Network.Spec.Ipv6Enabled,
   630  		Scope:      scope.Swarm,
   631  	}
   632  
   633  	if na.Network.Spec.GetNetwork() != "" {
   634  		options.ConfigFrom = &network.ConfigReference{
   635  			Network: na.Network.Spec.GetNetwork(),
   636  		}
   637  	}
   638  
   639  	if na.Network.DriverState != nil {
   640  		options.Driver = na.Network.DriverState.Name
   641  		options.Options = na.Network.DriverState.Options
   642  	}
   643  	if na.Network.IPAM != nil {
   644  		options.IPAM = &network.IPAM{
   645  			Driver:  na.Network.IPAM.Driver.Name,
   646  			Options: na.Network.IPAM.Driver.Options,
   647  		}
   648  		for _, ic := range na.Network.IPAM.Configs {
   649  			c := network.IPAMConfig{
   650  				Subnet:  ic.Subnet,
   651  				IPRange: ic.Range,
   652  				Gateway: ic.Gateway,
   653  			}
   654  			options.IPAM.Config = append(options.IPAM.Config, c)
   655  		}
   656  	}
   657  
   658  	return clustertypes.NetworkCreateRequest{
   659  		ID: na.Network.ID,
   660  		NetworkCreateRequest: types.NetworkCreateRequest{
   661  			Name:          name,
   662  			NetworkCreate: options,
   663  		},
   664  	}, nil
   665  }
   666  
   667  func (c *containerConfig) applyPrivileges(hc *enginecontainer.HostConfig) {
   668  	privileges := c.spec().Privileges
   669  	if privileges == nil {
   670  		return
   671  	}
   672  
   673  	credentials := privileges.CredentialSpec
   674  	if credentials != nil {
   675  		switch credentials.Source.(type) {
   676  		case *api.Privileges_CredentialSpec_File:
   677  			hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=file://"+credentials.GetFile())
   678  		case *api.Privileges_CredentialSpec_Registry:
   679  			hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=registry://"+credentials.GetRegistry())
   680  		case *api.Privileges_CredentialSpec_Config:
   681  			hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=config://"+credentials.GetConfig())
   682  		}
   683  	}
   684  
   685  	selinux := privileges.SELinuxContext
   686  	if selinux != nil {
   687  		if selinux.Disable {
   688  			hc.SecurityOpt = append(hc.SecurityOpt, "label=disable")
   689  		}
   690  		if selinux.User != "" {
   691  			hc.SecurityOpt = append(hc.SecurityOpt, "label=user:"+selinux.User)
   692  		}
   693  		if selinux.Role != "" {
   694  			hc.SecurityOpt = append(hc.SecurityOpt, "label=role:"+selinux.Role)
   695  		}
   696  		if selinux.Level != "" {
   697  			hc.SecurityOpt = append(hc.SecurityOpt, "label=level:"+selinux.Level)
   698  		}
   699  		if selinux.Type != "" {
   700  			hc.SecurityOpt = append(hc.SecurityOpt, "label=type:"+selinux.Type)
   701  		}
   702  	}
   703  
   704  	// variable to make the lines shorter and easier to read
   705  	if seccomp := privileges.Seccomp; seccomp != nil {
   706  		switch seccomp.Mode {
   707  		// case api.Privileges_SeccompOpts_DEFAULT:
   708  		//   if the setting is default, nothing needs to be set here. we leave
   709  		//   the option empty.
   710  		case api.Privileges_SeccompOpts_UNCONFINED:
   711  			hc.SecurityOpt = append(hc.SecurityOpt, "seccomp=unconfined")
   712  		case api.Privileges_SeccompOpts_CUSTOM:
   713  			// Profile is bytes, but those bytes are actually a string. This is
   714  			// basically verbatim what happens in the cli after a file is read.
   715  			hc.SecurityOpt = append(hc.SecurityOpt, fmt.Sprintf("seccomp=%s", seccomp.Profile))
   716  		}
   717  	}
   718  
   719  	// if the setting is DEFAULT, then nothing to be done. If it's DISABLED,
   720  	// we set that. Custom not supported yet. When custom *is* supported, make
   721  	// it look like the above.
   722  	if apparmor := privileges.Apparmor; apparmor != nil && apparmor.Mode == api.Privileges_AppArmorOpts_DISABLED {
   723  		hc.SecurityOpt = append(hc.SecurityOpt, "apparmor=unconfined")
   724  	}
   725  
   726  	if privileges.NoNewPrivileges {
   727  		hc.SecurityOpt = append(hc.SecurityOpt, "no-new-privileges=true")
   728  	}
   729  }
   730  
   731  func (c *containerConfig) eventFilter() filters.Args {
   732  	return filters.NewArgs(
   733  		filters.Arg("type", string(events.ContainerEventType)),
   734  		filters.Arg("name", c.name()),
   735  		filters.Arg("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID)),
   736  	)
   737  }