github.com/noxiouz/docker@v0.7.3-0.20160629055221-3d231c78e8c5/daemon/cluster/executor/container/container.go (about)

     1  package container
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"net"
     7  	"strings"
     8  	"time"
     9  
    10  	"github.com/Sirupsen/logrus"
    11  
    12  	clustertypes "github.com/docker/docker/daemon/cluster/provider"
    13  	"github.com/docker/docker/reference"
    14  	"github.com/docker/engine-api/types"
    15  	enginecontainer "github.com/docker/engine-api/types/container"
    16  	"github.com/docker/engine-api/types/network"
    17  	"github.com/docker/swarmkit/agent/exec"
    18  	"github.com/docker/swarmkit/api"
    19  )
    20  
    21  const (
    22  	// Explicitly use the kernel's default setting for CPU quota of 100ms.
    23  	// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
    24  	cpuQuotaPeriod = 100 * time.Millisecond
    25  
    26  	// systemLabelPrefix represents the reserved namespace for system labels.
    27  	systemLabelPrefix = "com.docker.swarm"
    28  )
    29  
    30  // containerConfig converts task properties into docker container compatible
    31  // components.
    32  type containerConfig struct {
    33  	task                *api.Task
    34  	networksAttachments map[string]*api.NetworkAttachment
    35  }
    36  
    37  // newContainerConfig returns a validated container config. No methods should
    38  // return an error if this function returns without error.
    39  func newContainerConfig(t *api.Task) (*containerConfig, error) {
    40  	var c containerConfig
    41  	return &c, c.setTask(t)
    42  }
    43  
    44  func (c *containerConfig) setTask(t *api.Task) error {
    45  	container := t.Spec.GetContainer()
    46  	if container == nil {
    47  		return exec.ErrRuntimeUnsupported
    48  	}
    49  
    50  	if container.Image == "" {
    51  		return ErrImageRequired
    52  	}
    53  
    54  	// index the networks by name
    55  	c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks))
    56  	for _, attachment := range t.Networks {
    57  		c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment
    58  	}
    59  
    60  	c.task = t
    61  	return nil
    62  }
    63  
    64  func (c *containerConfig) endpoint() *api.Endpoint {
    65  	return c.task.Endpoint
    66  }
    67  
    68  func (c *containerConfig) spec() *api.ContainerSpec {
    69  	return c.task.Spec.GetContainer()
    70  }
    71  
    72  func (c *containerConfig) name() string {
    73  	if c.task.Annotations.Name != "" {
    74  		// if set, use the container Annotations.Name field, set in the orchestrator.
    75  		return c.task.Annotations.Name
    76  	}
    77  
    78  	// fallback to service.slot.id.
    79  	return strings.Join([]string{c.task.ServiceAnnotations.Name, fmt.Sprint(c.task.Slot), c.task.ID}, ".")
    80  }
    81  
    82  func (c *containerConfig) image() string {
    83  	raw := c.spec().Image
    84  	ref, err := reference.ParseNamed(raw)
    85  	if err != nil {
    86  		return raw
    87  	}
    88  	return reference.WithDefaultTag(ref).String()
    89  }
    90  
    91  func (c *containerConfig) volumes() map[string]struct{} {
    92  	r := make(map[string]struct{})
    93  
    94  	for _, mount := range c.spec().Mounts {
    95  		// pick off all the volume mounts.
    96  		if mount.Type != api.MountTypeVolume {
    97  			continue
    98  		}
    99  
   100  		r[fmt.Sprintf("%s:%s", mount.Target, getMountMask(&mount))] = struct{}{}
   101  	}
   102  
   103  	return r
   104  }
   105  
   106  func (c *containerConfig) config() *enginecontainer.Config {
   107  	config := &enginecontainer.Config{
   108  		Labels:     c.labels(),
   109  		User:       c.spec().User,
   110  		Env:        c.spec().Env,
   111  		WorkingDir: c.spec().Dir,
   112  		Image:      c.image(),
   113  		Volumes:    c.volumes(),
   114  	}
   115  
   116  	if len(c.spec().Command) > 0 {
   117  		// If Command is provided, we replace the whole invocation with Command
   118  		// by replacing Entrypoint and specifying Cmd. Args is ignored in this
   119  		// case.
   120  		config.Entrypoint = append(config.Entrypoint, c.spec().Command[0])
   121  		config.Cmd = append(config.Cmd, c.spec().Command[1:]...)
   122  	} else if len(c.spec().Args) > 0 {
   123  		// In this case, we assume the image has an Entrypoint and Args
   124  		// specifies the arguments for that entrypoint.
   125  		config.Cmd = c.spec().Args
   126  	}
   127  
   128  	return config
   129  }
   130  
   131  func (c *containerConfig) labels() map[string]string {
   132  	var (
   133  		system = map[string]string{
   134  			"task":         "", // mark as cluster task
   135  			"task.id":      c.task.ID,
   136  			"task.name":    fmt.Sprintf("%v.%v", c.task.ServiceAnnotations.Name, c.task.Slot),
   137  			"node.id":      c.task.NodeID,
   138  			"service.id":   c.task.ServiceID,
   139  			"service.name": c.task.ServiceAnnotations.Name,
   140  		}
   141  		labels = make(map[string]string)
   142  	)
   143  
   144  	// base labels are those defined in the spec.
   145  	for k, v := range c.spec().Labels {
   146  		labels[k] = v
   147  	}
   148  
   149  	// we then apply the overrides from the task, which may be set via the
   150  	// orchestrator.
   151  	for k, v := range c.task.Annotations.Labels {
   152  		labels[k] = v
   153  	}
   154  
   155  	// finally, we apply the system labels, which override all labels.
   156  	for k, v := range system {
   157  		labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v
   158  	}
   159  
   160  	return labels
   161  }
   162  
   163  func (c *containerConfig) bindMounts() []string {
   164  	var r []string
   165  
   166  	for _, val := range c.spec().Mounts {
   167  		mask := getMountMask(&val)
   168  		if val.Type == api.MountTypeBind {
   169  			r = append(r, fmt.Sprintf("%s:%s:%s", val.Source, val.Target, mask))
   170  		}
   171  	}
   172  
   173  	return r
   174  }
   175  
   176  func getMountMask(m *api.Mount) string {
   177  	maskOpts := []string{"ro"}
   178  	if m.Writable {
   179  		maskOpts[0] = "rw"
   180  	}
   181  
   182  	if m.BindOptions != nil {
   183  		switch m.BindOptions.Propagation {
   184  		case api.MountPropagationPrivate:
   185  			maskOpts = append(maskOpts, "private")
   186  		case api.MountPropagationRPrivate:
   187  			maskOpts = append(maskOpts, "rprivate")
   188  		case api.MountPropagationShared:
   189  			maskOpts = append(maskOpts, "shared")
   190  		case api.MountPropagationRShared:
   191  			maskOpts = append(maskOpts, "rshared")
   192  		case api.MountPropagationSlave:
   193  			maskOpts = append(maskOpts, "slave")
   194  		case api.MountPropagationRSlave:
   195  			maskOpts = append(maskOpts, "rslave")
   196  		}
   197  	}
   198  
   199  	if m.VolumeOptions != nil {
   200  		if !m.VolumeOptions.Populate {
   201  			maskOpts = append(maskOpts, "nocopy")
   202  		}
   203  	}
   204  	return strings.Join(maskOpts, ",")
   205  }
   206  
   207  func (c *containerConfig) hostConfig() *enginecontainer.HostConfig {
   208  	return &enginecontainer.HostConfig{
   209  		Resources: c.resources(),
   210  		Binds:     c.bindMounts(),
   211  	}
   212  }
   213  
   214  // This handles the case of volumes that are defined inside a service Mount
   215  func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *types.VolumeCreateRequest {
   216  	var (
   217  		driverName string
   218  		driverOpts map[string]string
   219  		labels     map[string]string
   220  	)
   221  
   222  	if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil {
   223  		driverName = mount.VolumeOptions.DriverConfig.Name
   224  		driverOpts = mount.VolumeOptions.DriverConfig.Options
   225  		labels = mount.VolumeOptions.Labels
   226  	}
   227  
   228  	if mount.VolumeOptions != nil {
   229  		return &types.VolumeCreateRequest{
   230  			Name:       mount.Source,
   231  			Driver:     driverName,
   232  			DriverOpts: driverOpts,
   233  			Labels:     labels,
   234  		}
   235  	}
   236  	return nil
   237  }
   238  
   239  func (c *containerConfig) resources() enginecontainer.Resources {
   240  	resources := enginecontainer.Resources{}
   241  
   242  	// If no limits are specified let the engine use its defaults.
   243  	//
   244  	// TODO(aluzzardi): We might want to set some limits anyway otherwise
   245  	// "unlimited" tasks will step over the reservation of other tasks.
   246  	r := c.task.Spec.Resources
   247  	if r == nil || r.Limits == nil {
   248  		return resources
   249  	}
   250  
   251  	if r.Limits.MemoryBytes > 0 {
   252  		resources.Memory = r.Limits.MemoryBytes
   253  	}
   254  
   255  	if r.Limits.NanoCPUs > 0 {
   256  		// CPU Period must be set in microseconds.
   257  		resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond)
   258  		resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9
   259  	}
   260  
   261  	return resources
   262  }
   263  
   264  // Docker daemon supports just 1 network during container create.
   265  func (c *containerConfig) createNetworkingConfig() *network.NetworkingConfig {
   266  	var networks []*api.NetworkAttachment
   267  	if c.task.Spec.GetContainer() != nil {
   268  		networks = c.task.Networks
   269  	}
   270  
   271  	epConfig := make(map[string]*network.EndpointSettings)
   272  	if len(networks) > 0 {
   273  		epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0])
   274  	}
   275  
   276  	return &network.NetworkingConfig{EndpointsConfig: epConfig}
   277  }
   278  
   279  // TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create
   280  func (c *containerConfig) connectNetworkingConfig() *network.NetworkingConfig {
   281  	var networks []*api.NetworkAttachment
   282  	if c.task.Spec.GetContainer() != nil {
   283  		networks = c.task.Networks
   284  	}
   285  
   286  	// First network is used during container create. Other networks are used in "docker network connect"
   287  	if len(networks) < 2 {
   288  		return nil
   289  	}
   290  
   291  	epConfig := make(map[string]*network.EndpointSettings)
   292  	for _, na := range networks[1:] {
   293  		epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na)
   294  	}
   295  	return &network.NetworkingConfig{EndpointsConfig: epConfig}
   296  }
   297  
   298  func getEndpointConfig(na *api.NetworkAttachment) *network.EndpointSettings {
   299  	var ipv4, ipv6 string
   300  	for _, addr := range na.Addresses {
   301  		ip, _, err := net.ParseCIDR(addr)
   302  		if err != nil {
   303  			continue
   304  		}
   305  
   306  		if ip.To4() != nil {
   307  			ipv4 = ip.String()
   308  			continue
   309  		}
   310  
   311  		if ip.To16() != nil {
   312  			ipv6 = ip.String()
   313  		}
   314  	}
   315  
   316  	return &network.EndpointSettings{
   317  		IPAMConfig: &network.EndpointIPAMConfig{
   318  			IPv4Address: ipv4,
   319  			IPv6Address: ipv6,
   320  		},
   321  	}
   322  }
   323  
   324  func (c *containerConfig) virtualIP(networkID string) string {
   325  	if c.task.Endpoint == nil {
   326  		return ""
   327  	}
   328  
   329  	for _, eVip := range c.task.Endpoint.VirtualIPs {
   330  		// We only support IPv4 VIPs for now.
   331  		if eVip.NetworkID == networkID {
   332  			vip, _, err := net.ParseCIDR(eVip.Addr)
   333  			if err != nil {
   334  				return ""
   335  			}
   336  
   337  			return vip.String()
   338  		}
   339  	}
   340  
   341  	return ""
   342  }
   343  
   344  func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig {
   345  	if len(c.task.Networks) == 0 {
   346  		return nil
   347  	}
   348  
   349  	logrus.Debugf("Creating service config in agent for t = %+v", c.task)
   350  	svcCfg := &clustertypes.ServiceConfig{
   351  		Name:             c.task.ServiceAnnotations.Name,
   352  		Aliases:          make(map[string][]string),
   353  		ID:               c.task.ServiceID,
   354  		VirtualAddresses: make(map[string]*clustertypes.VirtualAddress),
   355  	}
   356  
   357  	for _, na := range c.task.Networks {
   358  		svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{
   359  			// We support only IPv4 virtual IP for now.
   360  			IPv4: c.virtualIP(na.Network.ID),
   361  		}
   362  		if len(na.Aliases) > 0 {
   363  			svcCfg.Aliases[na.Network.ID] = na.Aliases
   364  		}
   365  	}
   366  
   367  	if c.task.Endpoint != nil {
   368  		for _, ePort := range c.task.Endpoint.Ports {
   369  			svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{
   370  				Name:          ePort.Name,
   371  				Protocol:      int32(ePort.Protocol),
   372  				TargetPort:    ePort.TargetPort,
   373  				PublishedPort: ePort.PublishedPort,
   374  			})
   375  		}
   376  	}
   377  
   378  	return svcCfg
   379  }
   380  
   381  // networks returns a list of network names attached to the container. The
   382  // returned name can be used to lookup the corresponding network create
   383  // options.
   384  func (c *containerConfig) networks() []string {
   385  	var networks []string
   386  
   387  	for name := range c.networksAttachments {
   388  		networks = append(networks, name)
   389  	}
   390  
   391  	return networks
   392  }
   393  
   394  func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) {
   395  	na, ok := c.networksAttachments[name]
   396  	if !ok {
   397  		return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced")
   398  	}
   399  
   400  	options := types.NetworkCreate{
   401  		// ID:     na.Network.ID,
   402  		Driver: na.Network.DriverState.Name,
   403  		IPAM: network.IPAM{
   404  			Driver: na.Network.IPAM.Driver.Name,
   405  		},
   406  		Options:        na.Network.DriverState.Options,
   407  		CheckDuplicate: true,
   408  	}
   409  
   410  	for _, ic := range na.Network.IPAM.Configs {
   411  		c := network.IPAMConfig{
   412  			Subnet:  ic.Subnet,
   413  			IPRange: ic.Range,
   414  			Gateway: ic.Gateway,
   415  		}
   416  		options.IPAM.Config = append(options.IPAM.Config, c)
   417  	}
   418  
   419  	return clustertypes.NetworkCreateRequest{na.Network.ID, types.NetworkCreateRequest{Name: name, NetworkCreate: options}}, nil
   420  }