github.com/Heebron/moby@v0.0.0-20221111184709-6eab4f55faf7/daemon/cluster/executor/container/container.go (about)

     1  package container // import "github.com/docker/docker/daemon/cluster/executor/container"
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"net"
     7  	"strconv"
     8  	"strings"
     9  
    10  	"github.com/docker/distribution/reference"
    11  	"github.com/docker/docker/api/types"
    12  	enginecontainer "github.com/docker/docker/api/types/container"
    13  	"github.com/docker/docker/api/types/events"
    14  	"github.com/docker/docker/api/types/filters"
    15  	enginemount "github.com/docker/docker/api/types/mount"
    16  	"github.com/docker/docker/api/types/network"
    17  	"github.com/docker/docker/api/types/volume"
    18  	"github.com/docker/docker/daemon/cluster/convert"
    19  	executorpkg "github.com/docker/docker/daemon/cluster/executor"
    20  	clustertypes "github.com/docker/docker/daemon/cluster/provider"
    21  	netconst "github.com/docker/docker/libnetwork/datastore"
    22  	"github.com/docker/go-connections/nat"
    23  	"github.com/docker/go-units"
    24  	gogotypes "github.com/gogo/protobuf/types"
    25  	"github.com/moby/swarmkit/v2/agent/exec"
    26  	"github.com/moby/swarmkit/v2/api"
    27  	"github.com/moby/swarmkit/v2/api/genericresource"
    28  	"github.com/moby/swarmkit/v2/template"
    29  	"github.com/sirupsen/logrus"
    30  )
    31  
    32  const (
    33  	// systemLabelPrefix represents the reserved namespace for system labels.
    34  	systemLabelPrefix = "com.docker.swarm"
    35  )
    36  
    37  // containerConfig converts task properties into docker container compatible
    38  // components.
    39  type containerConfig struct {
    40  	task                *api.Task
    41  	networksAttachments map[string]*api.NetworkAttachment
    42  }
    43  
    44  // newContainerConfig returns a validated container config. No methods should
    45  // return an error if this function returns without error.
    46  func newContainerConfig(t *api.Task, node *api.NodeDescription) (*containerConfig, error) {
    47  	var c containerConfig
    48  	return &c, c.setTask(t, node)
    49  }
    50  
    51  func (c *containerConfig) setTask(t *api.Task, node *api.NodeDescription) error {
    52  	if t.Spec.GetContainer() == nil && t.Spec.GetAttachment() == nil {
    53  		return exec.ErrRuntimeUnsupported
    54  	}
    55  
    56  	container := t.Spec.GetContainer()
    57  	if container != nil {
    58  		if container.Image == "" {
    59  			return ErrImageRequired
    60  		}
    61  
    62  		if err := validateMounts(container.Mounts); err != nil {
    63  			return err
    64  		}
    65  	}
    66  
    67  	// index the networks by name
    68  	c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks))
    69  	for _, attachment := range t.Networks {
    70  		c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment
    71  	}
    72  
    73  	c.task = t
    74  
    75  	if t.Spec.GetContainer() != nil {
    76  		preparedSpec, err := template.ExpandContainerSpec(node, t)
    77  		if err != nil {
    78  			return err
    79  		}
    80  		c.task.Spec.Runtime = &api.TaskSpec_Container{
    81  			Container: preparedSpec,
    82  		}
    83  	}
    84  
    85  	return nil
    86  }
    87  
    88  func (c *containerConfig) networkAttachmentContainerID() string {
    89  	attachment := c.task.Spec.GetAttachment()
    90  	if attachment == nil {
    91  		return ""
    92  	}
    93  
    94  	return attachment.ContainerID
    95  }
    96  
    97  func (c *containerConfig) taskID() string {
    98  	return c.task.ID
    99  }
   100  
   101  func (c *containerConfig) spec() *api.ContainerSpec {
   102  	return c.task.Spec.GetContainer()
   103  }
   104  
   105  func (c *containerConfig) nameOrID() string {
   106  	if c.task.Spec.GetContainer() != nil {
   107  		return c.name()
   108  	}
   109  
   110  	return c.networkAttachmentContainerID()
   111  }
   112  
   113  func (c *containerConfig) name() string {
   114  	if c.task.Annotations.Name != "" {
   115  		// if set, use the container Annotations.Name field, set in the orchestrator.
   116  		return c.task.Annotations.Name
   117  	}
   118  
   119  	slot := fmt.Sprint(c.task.Slot)
   120  	if slot == "" || c.task.Slot == 0 {
   121  		slot = c.task.NodeID
   122  	}
   123  
   124  	// fallback to service.slot.id.
   125  	return fmt.Sprintf("%s.%s.%s", c.task.ServiceAnnotations.Name, slot, c.task.ID)
   126  }
   127  
   128  func (c *containerConfig) image() string {
   129  	raw := c.spec().Image
   130  	ref, err := reference.ParseNormalizedNamed(raw)
   131  	if err != nil {
   132  		return raw
   133  	}
   134  	return reference.FamiliarString(reference.TagNameOnly(ref))
   135  }
   136  
   137  func (c *containerConfig) portBindings() nat.PortMap {
   138  	portBindings := nat.PortMap{}
   139  	if c.task.Endpoint == nil {
   140  		return portBindings
   141  	}
   142  
   143  	for _, portConfig := range c.task.Endpoint.Ports {
   144  		if portConfig.PublishMode != api.PublishModeHost {
   145  			continue
   146  		}
   147  
   148  		port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String())))
   149  		binding := []nat.PortBinding{
   150  			{},
   151  		}
   152  
   153  		if portConfig.PublishedPort != 0 {
   154  			binding[0].HostPort = strconv.Itoa(int(portConfig.PublishedPort))
   155  		}
   156  		portBindings[port] = binding
   157  	}
   158  
   159  	return portBindings
   160  }
   161  
   162  func (c *containerConfig) isolation() enginecontainer.Isolation {
   163  	return convert.IsolationFromGRPC(c.spec().Isolation)
   164  }
   165  
   166  func (c *containerConfig) init() *bool {
   167  	if c.spec().Init == nil {
   168  		return nil
   169  	}
   170  	init := c.spec().Init.GetValue()
   171  	return &init
   172  }
   173  
   174  func (c *containerConfig) exposedPorts() map[nat.Port]struct{} {
   175  	exposedPorts := make(map[nat.Port]struct{})
   176  	if c.task.Endpoint == nil {
   177  		return exposedPorts
   178  	}
   179  
   180  	for _, portConfig := range c.task.Endpoint.Ports {
   181  		if portConfig.PublishMode != api.PublishModeHost {
   182  			continue
   183  		}
   184  
   185  		port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String())))
   186  		exposedPorts[port] = struct{}{}
   187  	}
   188  
   189  	return exposedPorts
   190  }
   191  
   192  func (c *containerConfig) config() *enginecontainer.Config {
   193  	genericEnvs := genericresource.EnvFormat(c.task.AssignedGenericResources, "DOCKER_RESOURCE")
   194  	env := append(c.spec().Env, genericEnvs...)
   195  
   196  	config := &enginecontainer.Config{
   197  		Labels:       c.labels(),
   198  		StopSignal:   c.spec().StopSignal,
   199  		Tty:          c.spec().TTY,
   200  		OpenStdin:    c.spec().OpenStdin,
   201  		User:         c.spec().User,
   202  		Env:          env,
   203  		Hostname:     c.spec().Hostname,
   204  		WorkingDir:   c.spec().Dir,
   205  		Image:        c.image(),
   206  		ExposedPorts: c.exposedPorts(),
   207  		Healthcheck:  c.healthcheck(),
   208  	}
   209  
   210  	if len(c.spec().Command) > 0 {
   211  		// If Command is provided, we replace the whole invocation with Command
   212  		// by replacing Entrypoint and specifying Cmd. Args is ignored in this
   213  		// case.
   214  		config.Entrypoint = append(config.Entrypoint, c.spec().Command...)
   215  		config.Cmd = append(config.Cmd, c.spec().Args...)
   216  	} else if len(c.spec().Args) > 0 {
   217  		// In this case, we assume the image has an Entrypoint and Args
   218  		// specifies the arguments for that entrypoint.
   219  		config.Cmd = c.spec().Args
   220  	}
   221  
   222  	return config
   223  }
   224  
   225  func (c *containerConfig) labels() map[string]string {
   226  	var (
   227  		system = map[string]string{
   228  			"task":         "", // mark as cluster task
   229  			"task.id":      c.task.ID,
   230  			"task.name":    c.name(),
   231  			"node.id":      c.task.NodeID,
   232  			"service.id":   c.task.ServiceID,
   233  			"service.name": c.task.ServiceAnnotations.Name,
   234  		}
   235  		labels = make(map[string]string)
   236  	)
   237  
   238  	// base labels are those defined in the spec.
   239  	for k, v := range c.spec().Labels {
   240  		labels[k] = v
   241  	}
   242  
   243  	// we then apply the overrides from the task, which may be set via the
   244  	// orchestrator.
   245  	for k, v := range c.task.Annotations.Labels {
   246  		labels[k] = v
   247  	}
   248  
   249  	// finally, we apply the system labels, which override all labels.
   250  	for k, v := range system {
   251  		labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v
   252  	}
   253  
   254  	return labels
   255  }
   256  
   257  func (c *containerConfig) mounts(deps exec.VolumeGetter) []enginemount.Mount {
   258  	var r []enginemount.Mount
   259  	for _, mount := range c.spec().Mounts {
   260  		if mount.Type == api.MountTypeCluster {
   261  			r = append(r, c.convertCSIMount(mount, deps))
   262  		} else {
   263  			r = append(r, convertMount(mount))
   264  		}
   265  	}
   266  	return r
   267  }
   268  
   269  // convertCSIMount matches the CSI mount with the path of the CSI volume.
   270  //
   271  // technically quadratic with respect to the number of CSI mounts, but that
   272  // number shouldn't ever be large enough for quadratic to matter.
   273  //
   274  // TODO(dperny): figure out a scheme for errors? or maybe add code to
   275  // checkMounts?
   276  func (c *containerConfig) convertCSIMount(m api.Mount, deps exec.VolumeGetter) enginemount.Mount {
   277  	var mount enginemount.Mount
   278  
   279  	// these are actually bind mounts
   280  	mount.Type = enginemount.TypeBind
   281  
   282  	for _, attach := range c.task.Volumes {
   283  		if attach.Source == m.Source && attach.Target == m.Target {
   284  			// we should not get an error here, because we should have checked
   285  			// already that the volume is ready
   286  			path, _ := deps.Get(attach.ID)
   287  			mount.Source = path
   288  			mount.Target = m.Target
   289  		}
   290  	}
   291  
   292  	return mount
   293  }
   294  
   295  func convertMount(m api.Mount) enginemount.Mount {
   296  	mount := enginemount.Mount{
   297  		Source:   m.Source,
   298  		Target:   m.Target,
   299  		ReadOnly: m.ReadOnly,
   300  	}
   301  
   302  	switch m.Type {
   303  	case api.MountTypeBind:
   304  		mount.Type = enginemount.TypeBind
   305  	case api.MountTypeVolume:
   306  		mount.Type = enginemount.TypeVolume
   307  	case api.MountTypeTmpfs:
   308  		mount.Type = enginemount.TypeTmpfs
   309  	case api.MountTypeNamedPipe:
   310  		mount.Type = enginemount.TypeNamedPipe
   311  	case api.MountTypeCluster:
   312  		mount.Type = enginemount.TypeCluster
   313  	}
   314  
   315  	if m.BindOptions != nil {
   316  		mount.BindOptions = &enginemount.BindOptions{
   317  			NonRecursive: m.BindOptions.NonRecursive,
   318  		}
   319  		switch m.BindOptions.Propagation {
   320  		case api.MountPropagationRPrivate:
   321  			mount.BindOptions.Propagation = enginemount.PropagationRPrivate
   322  		case api.MountPropagationPrivate:
   323  			mount.BindOptions.Propagation = enginemount.PropagationPrivate
   324  		case api.MountPropagationRSlave:
   325  			mount.BindOptions.Propagation = enginemount.PropagationRSlave
   326  		case api.MountPropagationSlave:
   327  			mount.BindOptions.Propagation = enginemount.PropagationSlave
   328  		case api.MountPropagationRShared:
   329  			mount.BindOptions.Propagation = enginemount.PropagationRShared
   330  		case api.MountPropagationShared:
   331  			mount.BindOptions.Propagation = enginemount.PropagationShared
   332  		}
   333  	}
   334  
   335  	if m.VolumeOptions != nil {
   336  		mount.VolumeOptions = &enginemount.VolumeOptions{
   337  			NoCopy: m.VolumeOptions.NoCopy,
   338  		}
   339  		if m.VolumeOptions.Labels != nil {
   340  			mount.VolumeOptions.Labels = make(map[string]string, len(m.VolumeOptions.Labels))
   341  			for k, v := range m.VolumeOptions.Labels {
   342  				mount.VolumeOptions.Labels[k] = v
   343  			}
   344  		}
   345  		if m.VolumeOptions.DriverConfig != nil {
   346  			mount.VolumeOptions.DriverConfig = &enginemount.Driver{
   347  				Name: m.VolumeOptions.DriverConfig.Name,
   348  			}
   349  			if m.VolumeOptions.DriverConfig.Options != nil {
   350  				mount.VolumeOptions.DriverConfig.Options = make(map[string]string, len(m.VolumeOptions.DriverConfig.Options))
   351  				for k, v := range m.VolumeOptions.DriverConfig.Options {
   352  					mount.VolumeOptions.DriverConfig.Options[k] = v
   353  				}
   354  			}
   355  		}
   356  	}
   357  
   358  	if m.TmpfsOptions != nil {
   359  		mount.TmpfsOptions = &enginemount.TmpfsOptions{
   360  			SizeBytes: m.TmpfsOptions.SizeBytes,
   361  			Mode:      m.TmpfsOptions.Mode,
   362  		}
   363  	}
   364  
   365  	return mount
   366  }
   367  
   368  func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig {
   369  	hcSpec := c.spec().Healthcheck
   370  	if hcSpec == nil {
   371  		return nil
   372  	}
   373  	interval, _ := gogotypes.DurationFromProto(hcSpec.Interval)
   374  	timeout, _ := gogotypes.DurationFromProto(hcSpec.Timeout)
   375  	startPeriod, _ := gogotypes.DurationFromProto(hcSpec.StartPeriod)
   376  	return &enginecontainer.HealthConfig{
   377  		Test:        hcSpec.Test,
   378  		Interval:    interval,
   379  		Timeout:     timeout,
   380  		Retries:     int(hcSpec.Retries),
   381  		StartPeriod: startPeriod,
   382  	}
   383  }
   384  
   385  func (c *containerConfig) hostConfig(deps exec.VolumeGetter) *enginecontainer.HostConfig {
   386  	hc := &enginecontainer.HostConfig{
   387  		Resources:      c.resources(),
   388  		GroupAdd:       c.spec().Groups,
   389  		PortBindings:   c.portBindings(),
   390  		Mounts:         c.mounts(deps),
   391  		ReadonlyRootfs: c.spec().ReadOnly,
   392  		Isolation:      c.isolation(),
   393  		Init:           c.init(),
   394  		Sysctls:        c.spec().Sysctls,
   395  		CapAdd:         c.spec().CapabilityAdd,
   396  		CapDrop:        c.spec().CapabilityDrop,
   397  	}
   398  
   399  	if c.spec().DNSConfig != nil {
   400  		hc.DNS = c.spec().DNSConfig.Nameservers
   401  		hc.DNSSearch = c.spec().DNSConfig.Search
   402  		hc.DNSOptions = c.spec().DNSConfig.Options
   403  	}
   404  
   405  	c.applyPrivileges(hc)
   406  
   407  	// The format of extra hosts on swarmkit is specified in:
   408  	// http://man7.org/linux/man-pages/man5/hosts.5.html
   409  	//    IP_address canonical_hostname [aliases...]
   410  	// However, the format of ExtraHosts in HostConfig is
   411  	//    <host>:<ip>
   412  	// We need to do the conversion here
   413  	// (Alias is ignored for now)
   414  	for _, entry := range c.spec().Hosts {
   415  		parts := strings.Fields(entry)
   416  		if len(parts) > 1 {
   417  			hc.ExtraHosts = append(hc.ExtraHosts, fmt.Sprintf("%s:%s", parts[1], parts[0]))
   418  		}
   419  	}
   420  
   421  	if c.task.LogDriver != nil {
   422  		hc.LogConfig = enginecontainer.LogConfig{
   423  			Type:   c.task.LogDriver.Name,
   424  			Config: c.task.LogDriver.Options,
   425  		}
   426  	}
   427  
   428  	if len(c.task.Networks) > 0 {
   429  		labels := c.task.Networks[0].Network.Spec.Annotations.Labels
   430  		name := c.task.Networks[0].Network.Spec.Annotations.Name
   431  		if v, ok := labels["com.docker.swarm.predefined"]; ok && v == "true" {
   432  			hc.NetworkMode = enginecontainer.NetworkMode(name)
   433  		}
   434  	}
   435  
   436  	return hc
   437  }
   438  
   439  // This handles the case of volumes that are defined inside a service Mount
   440  func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *volume.CreateOptions {
   441  	var (
   442  		driverName string
   443  		driverOpts map[string]string
   444  		labels     map[string]string
   445  	)
   446  
   447  	if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil {
   448  		driverName = mount.VolumeOptions.DriverConfig.Name
   449  		driverOpts = mount.VolumeOptions.DriverConfig.Options
   450  		labels = mount.VolumeOptions.Labels
   451  	}
   452  
   453  	if mount.VolumeOptions != nil {
   454  		return &volume.CreateOptions{
   455  			Name:       mount.Source,
   456  			Driver:     driverName,
   457  			DriverOpts: driverOpts,
   458  			Labels:     labels,
   459  		}
   460  	}
   461  	return nil
   462  }
   463  
   464  func (c *containerConfig) resources() enginecontainer.Resources {
   465  	resources := enginecontainer.Resources{}
   466  
   467  	// set pids limit
   468  	pidsLimit := c.spec().PidsLimit
   469  	if pidsLimit > 0 {
   470  		resources.PidsLimit = &pidsLimit
   471  	}
   472  
   473  	resources.Ulimits = make([]*units.Ulimit, len(c.spec().Ulimits))
   474  	for i, ulimit := range c.spec().Ulimits {
   475  		resources.Ulimits[i] = &units.Ulimit{
   476  			Name: ulimit.Name,
   477  			Soft: ulimit.Soft,
   478  			Hard: ulimit.Hard,
   479  		}
   480  	}
   481  
   482  	// If no limits are specified let the engine use its defaults.
   483  	//
   484  	// TODO(aluzzardi): We might want to set some limits anyway otherwise
   485  	// "unlimited" tasks will step over the reservation of other tasks.
   486  	r := c.task.Spec.Resources
   487  	if r == nil || r.Limits == nil {
   488  		return resources
   489  	}
   490  
   491  	if r.Limits.MemoryBytes > 0 {
   492  		resources.Memory = r.Limits.MemoryBytes
   493  	}
   494  
   495  	if r.Limits.NanoCPUs > 0 {
   496  		resources.NanoCPUs = r.Limits.NanoCPUs
   497  	}
   498  
   499  	return resources
   500  }
   501  
   502  // Docker daemon supports just 1 network during container create.
   503  func (c *containerConfig) createNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig {
   504  	var networks []*api.NetworkAttachment
   505  	if c.task.Spec.GetContainer() != nil || c.task.Spec.GetAttachment() != nil {
   506  		networks = c.task.Networks
   507  	}
   508  
   509  	epConfig := make(map[string]*network.EndpointSettings)
   510  	if len(networks) > 0 {
   511  		epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0], b)
   512  	}
   513  
   514  	return &network.NetworkingConfig{EndpointsConfig: epConfig}
   515  }
   516  
   517  // TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create
   518  func (c *containerConfig) connectNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig {
   519  	var networks []*api.NetworkAttachment
   520  	if c.task.Spec.GetContainer() != nil {
   521  		networks = c.task.Networks
   522  	}
   523  	// First network is used during container create. Other networks are used in "docker network connect"
   524  	if len(networks) < 2 {
   525  		return nil
   526  	}
   527  
   528  	epConfig := make(map[string]*network.EndpointSettings)
   529  	for _, na := range networks[1:] {
   530  		epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na, b)
   531  	}
   532  	return &network.NetworkingConfig{EndpointsConfig: epConfig}
   533  }
   534  
   535  func getEndpointConfig(na *api.NetworkAttachment, b executorpkg.Backend) *network.EndpointSettings {
   536  	var ipv4, ipv6 string
   537  	for _, addr := range na.Addresses {
   538  		ip, _, err := net.ParseCIDR(addr)
   539  		if err != nil {
   540  			continue
   541  		}
   542  
   543  		if ip.To4() != nil {
   544  			ipv4 = ip.String()
   545  			continue
   546  		}
   547  
   548  		if ip.To16() != nil {
   549  			ipv6 = ip.String()
   550  		}
   551  	}
   552  
   553  	n := &network.EndpointSettings{
   554  		NetworkID: na.Network.ID,
   555  		IPAMConfig: &network.EndpointIPAMConfig{
   556  			IPv4Address: ipv4,
   557  			IPv6Address: ipv6,
   558  		},
   559  		DriverOpts: na.DriverAttachmentOpts,
   560  	}
   561  	if v, ok := na.Network.Spec.Annotations.Labels["com.docker.swarm.predefined"]; ok && v == "true" {
   562  		if ln, err := b.FindNetwork(na.Network.Spec.Annotations.Name); err == nil {
   563  			n.NetworkID = ln.ID()
   564  		}
   565  	}
   566  	return n
   567  }
   568  
   569  func (c *containerConfig) virtualIP(networkID string) string {
   570  	if c.task.Endpoint == nil {
   571  		return ""
   572  	}
   573  
   574  	for _, eVip := range c.task.Endpoint.VirtualIPs {
   575  		// We only support IPv4 VIPs for now.
   576  		if eVip.NetworkID == networkID {
   577  			vip, _, err := net.ParseCIDR(eVip.Addr)
   578  			if err != nil {
   579  				return ""
   580  			}
   581  
   582  			return vip.String()
   583  		}
   584  	}
   585  
   586  	return ""
   587  }
   588  
   589  func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig {
   590  	if len(c.task.Networks) == 0 {
   591  		return nil
   592  	}
   593  
   594  	logrus.Debugf("Creating service config in agent for t = %+v", c.task)
   595  	svcCfg := &clustertypes.ServiceConfig{
   596  		Name:             c.task.ServiceAnnotations.Name,
   597  		Aliases:          make(map[string][]string),
   598  		ID:               c.task.ServiceID,
   599  		VirtualAddresses: make(map[string]*clustertypes.VirtualAddress),
   600  	}
   601  
   602  	for _, na := range c.task.Networks {
   603  		svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{
   604  			// We support only IPv4 virtual IP for now.
   605  			IPv4: c.virtualIP(na.Network.ID),
   606  		}
   607  		if len(na.Aliases) > 0 {
   608  			svcCfg.Aliases[na.Network.ID] = na.Aliases
   609  		}
   610  	}
   611  
   612  	if c.task.Endpoint != nil {
   613  		for _, ePort := range c.task.Endpoint.Ports {
   614  			if ePort.PublishMode != api.PublishModeIngress {
   615  				continue
   616  			}
   617  
   618  			svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{
   619  				Name:          ePort.Name,
   620  				Protocol:      int32(ePort.Protocol),
   621  				TargetPort:    ePort.TargetPort,
   622  				PublishedPort: ePort.PublishedPort,
   623  			})
   624  		}
   625  	}
   626  
   627  	return svcCfg
   628  }
   629  
   630  func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) {
   631  	na, ok := c.networksAttachments[name]
   632  	if !ok {
   633  		return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced")
   634  	}
   635  
   636  	options := types.NetworkCreate{
   637  		// ID:     na.Network.ID,
   638  		Labels:         na.Network.Spec.Annotations.Labels,
   639  		Internal:       na.Network.Spec.Internal,
   640  		Attachable:     na.Network.Spec.Attachable,
   641  		Ingress:        convert.IsIngressNetwork(na.Network),
   642  		EnableIPv6:     na.Network.Spec.Ipv6Enabled,
   643  		CheckDuplicate: true,
   644  		Scope:          netconst.SwarmScope,
   645  	}
   646  
   647  	if na.Network.Spec.GetNetwork() != "" {
   648  		options.ConfigFrom = &network.ConfigReference{
   649  			Network: na.Network.Spec.GetNetwork(),
   650  		}
   651  	}
   652  
   653  	if na.Network.DriverState != nil {
   654  		options.Driver = na.Network.DriverState.Name
   655  		options.Options = na.Network.DriverState.Options
   656  	}
   657  	if na.Network.IPAM != nil {
   658  		options.IPAM = &network.IPAM{
   659  			Driver:  na.Network.IPAM.Driver.Name,
   660  			Options: na.Network.IPAM.Driver.Options,
   661  		}
   662  		for _, ic := range na.Network.IPAM.Configs {
   663  			c := network.IPAMConfig{
   664  				Subnet:  ic.Subnet,
   665  				IPRange: ic.Range,
   666  				Gateway: ic.Gateway,
   667  			}
   668  			options.IPAM.Config = append(options.IPAM.Config, c)
   669  		}
   670  	}
   671  
   672  	return clustertypes.NetworkCreateRequest{
   673  		ID: na.Network.ID,
   674  		NetworkCreateRequest: types.NetworkCreateRequest{
   675  			Name:          name,
   676  			NetworkCreate: options,
   677  		},
   678  	}, nil
   679  }
   680  
   681  func (c *containerConfig) applyPrivileges(hc *enginecontainer.HostConfig) {
   682  	privileges := c.spec().Privileges
   683  	if privileges == nil {
   684  		return
   685  	}
   686  
   687  	credentials := privileges.CredentialSpec
   688  	if credentials != nil {
   689  		switch credentials.Source.(type) {
   690  		case *api.Privileges_CredentialSpec_File:
   691  			hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=file://"+credentials.GetFile())
   692  		case *api.Privileges_CredentialSpec_Registry:
   693  			hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=registry://"+credentials.GetRegistry())
   694  		case *api.Privileges_CredentialSpec_Config:
   695  			hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=config://"+credentials.GetConfig())
   696  		}
   697  	}
   698  
   699  	selinux := privileges.SELinuxContext
   700  	if selinux != nil {
   701  		if selinux.Disable {
   702  			hc.SecurityOpt = append(hc.SecurityOpt, "label=disable")
   703  		}
   704  		if selinux.User != "" {
   705  			hc.SecurityOpt = append(hc.SecurityOpt, "label=user:"+selinux.User)
   706  		}
   707  		if selinux.Role != "" {
   708  			hc.SecurityOpt = append(hc.SecurityOpt, "label=role:"+selinux.Role)
   709  		}
   710  		if selinux.Level != "" {
   711  			hc.SecurityOpt = append(hc.SecurityOpt, "label=level:"+selinux.Level)
   712  		}
   713  		if selinux.Type != "" {
   714  			hc.SecurityOpt = append(hc.SecurityOpt, "label=type:"+selinux.Type)
   715  		}
   716  	}
   717  }
   718  
   719  func (c containerConfig) eventFilter() filters.Args {
   720  	filter := filters.NewArgs()
   721  	filter.Add("type", events.ContainerEventType)
   722  	filter.Add("name", c.name())
   723  	filter.Add("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID))
   724  	return filter
   725  }