github.com/blixtra/nomad@v0.7.2-0.20171221000451-da9a1d7bb050/client/driver/docker.go (about)

     1  package driver
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"fmt"
     7  	"log"
     8  	"net"
     9  	"os"
    10  	"os/exec"
    11  	"path/filepath"
    12  	"runtime"
    13  	"strconv"
    14  	"strings"
    15  	"sync"
    16  	"syscall"
    17  	"time"
    18  
    19  	"github.com/armon/circbuf"
    20  	docker "github.com/fsouza/go-dockerclient"
    21  
    22  	"github.com/docker/docker/cli/config/configfile"
    23  	"github.com/docker/docker/reference"
    24  	"github.com/docker/docker/registry"
    25  
    26  	"github.com/hashicorp/go-multierror"
    27  	"github.com/hashicorp/go-plugin"
    28  	"github.com/hashicorp/nomad/client/allocdir"
    29  	"github.com/hashicorp/nomad/client/config"
    30  	"github.com/hashicorp/nomad/client/driver/env"
    31  	"github.com/hashicorp/nomad/client/driver/executor"
    32  	dstructs "github.com/hashicorp/nomad/client/driver/structs"
    33  	cstructs "github.com/hashicorp/nomad/client/structs"
    34  	"github.com/hashicorp/nomad/helper"
    35  	"github.com/hashicorp/nomad/helper/fields"
    36  	shelpers "github.com/hashicorp/nomad/helper/stats"
    37  	"github.com/hashicorp/nomad/nomad/structs"
    38  	"github.com/mitchellh/mapstructure"
    39  )
    40  
    41  var (
    42  	// We store the clients globally to cache the connection to the docker daemon.
    43  	createClients sync.Once
    44  
    45  	// client is a docker client with a timeout of 1 minute. This is for doing
    46  	// all operations with the docker daemon besides which are not long running
    47  	// such as creating, killing containers, etc.
    48  	client *docker.Client
    49  
    50  	// waitClient is a docker client with no timeouts. This is used for long
    51  	// running operations such as waiting on containers and collect stats
    52  	waitClient *docker.Client
    53  
    54  	// The statistics the Docker driver exposes
    55  	DockerMeasuredMemStats = []string{"RSS", "Cache", "Swap", "Max Usage"}
    56  	DockerMeasuredCpuStats = []string{"Throttled Periods", "Throttled Time", "Percent"}
    57  
    58  	// recoverableErrTimeouts returns a recoverable error if the error was due
    59  	// to timeouts
    60  	recoverableErrTimeouts = func(err error) error {
    61  		r := false
    62  		if strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") ||
    63  			strings.Contains(err.Error(), "EOF") {
    64  			r = true
    65  		}
    66  		return structs.NewRecoverableError(err, r)
    67  	}
    68  )
    69  
    70  const (
    71  	// NoSuchContainerError is returned by the docker daemon if the container
    72  	// does not exist.
    73  	NoSuchContainerError = "No such container"
    74  
    75  	// The key populated in Node Attributes to indicate presence of the Docker
    76  	// driver
    77  	dockerDriverAttr = "driver.docker"
    78  
    79  	// dockerSELinuxLabelConfigOption is the key for configuring the
    80  	// SELinux label for binds.
    81  	dockerSELinuxLabelConfigOption = "docker.volumes.selinuxlabel"
    82  
    83  	// dockerVolumesConfigOption is the key for enabling the use of custom
    84  	// bind volumes to arbitrary host paths.
    85  	dockerVolumesConfigOption  = "docker.volumes.enabled"
    86  	dockerVolumesConfigDefault = true
    87  
    88  	// dockerPrivilegedConfigOption is the key for running containers in
    89  	// Docker's privileged mode.
    90  	dockerPrivilegedConfigOption = "docker.privileged.enabled"
    91  
    92  	// dockerCleanupImageConfigOption is the key for whether or not to
    93  	// cleanup images after the task exits.
    94  	dockerCleanupImageConfigOption  = "docker.cleanup.image"
    95  	dockerCleanupImageConfigDefault = true
    96  
    97  	// dockerPullTimeoutConfigOption is the key for setting an images pull
    98  	// timeout
    99  	dockerImageRemoveDelayConfigOption  = "docker.cleanup.image.delay"
   100  	dockerImageRemoveDelayConfigDefault = 3 * time.Minute
   101  
   102  	// dockerTimeout is the length of time a request can be outstanding before
   103  	// it is timed out.
   104  	dockerTimeout = 5 * time.Minute
   105  
   106  	// dockerImageResKey is the CreatedResources key for docker images
   107  	dockerImageResKey = "image"
   108  
   109  	// dockerAuthHelperPrefix is the prefix to attach to the credential helper
   110  	// and should be found in the $PATH. Example: ${prefix-}${helper-name}
   111  	dockerAuthHelperPrefix = "docker-credential-"
   112  )
   113  
   114  type DockerDriver struct {
   115  	DriverContext
   116  
   117  	driverConfig *DockerDriverConfig
   118  	imageID      string
   119  
   120  	// A tri-state boolean to know if the fingerprinting has happened and
   121  	// whether it has been successful
   122  	fingerprintSuccess *bool
   123  }
   124  
   125  type DockerDriverAuth struct {
   126  	Username      string `mapstructure:"username"`       // username for the registry
   127  	Password      string `mapstructure:"password"`       // password to access the registry
   128  	Email         string `mapstructure:"email"`          // email address of the user who is allowed to access the registry
   129  	ServerAddress string `mapstructure:"server_address"` // server address of the registry
   130  }
   131  
   132  type DockerLoggingOpts struct {
   133  	Type      string              `mapstructure:"type"`
   134  	ConfigRaw []map[string]string `mapstructure:"config"`
   135  	Config    map[string]string   `mapstructure:"-"`
   136  }
   137  
   138  type DockerMount struct {
   139  	Target        string                 `mapstructure:"target"`
   140  	Source        string                 `mapstructure:"source"`
   141  	ReadOnly      bool                   `mapstructure:"readonly"`
   142  	VolumeOptions []*DockerVolumeOptions `mapstructure:"volume_options"`
   143  }
   144  
   145  type DockerDevice struct {
   146  	HostPath          string `mapstructure:"host_path"`
   147  	ContainerPath     string `mapstructure:"container_path"`
   148  	CgroupPermissions string `mapstructure:"cgroup_permissions"`
   149  }
   150  
   151  type DockerVolumeOptions struct {
   152  	NoCopy       bool                       `mapstructure:"no_copy"`
   153  	Labels       []map[string]string        `mapstructure:"labels"`
   154  	DriverConfig []DockerVolumeDriverConfig `mapstructure:"driver_config"`
   155  }
   156  
   157  // VolumeDriverConfig holds a map of volume driver specific options
   158  type DockerVolumeDriverConfig struct {
   159  	Name    string              `mapstructure:"name"`
   160  	Options []map[string]string `mapstructure:"options"`
   161  }
   162  
   163  // DockerDriverConfig defines the user specified config block in a jobspec
   164  type DockerDriverConfig struct {
   165  	ImageName        string              `mapstructure:"image"`              // Container's Image Name
   166  	LoadImage        string              `mapstructure:"load"`               // LoadImage is a path to an image archive file
   167  	Command          string              `mapstructure:"command"`            // The Command to run when the container starts up
   168  	Args             []string            `mapstructure:"args"`               // The arguments to the Command
   169  	IpcMode          string              `mapstructure:"ipc_mode"`           // The IPC mode of the container - host and none
   170  	NetworkMode      string              `mapstructure:"network_mode"`       // The network mode of the container - host, nat and none
   171  	NetworkAliases   []string            `mapstructure:"network_aliases"`    // The network-scoped alias for the container
   172  	IPv4Address      string              `mapstructure:"ipv4_address"`       // The container ipv4 address
   173  	IPv6Address      string              `mapstructure:"ipv6_address"`       // the container ipv6 address
   174  	PidMode          string              `mapstructure:"pid_mode"`           // The PID mode of the container - host and none
   175  	UTSMode          string              `mapstructure:"uts_mode"`           // The UTS mode of the container - host and none
   176  	UsernsMode       string              `mapstructure:"userns_mode"`        // The User namespace mode of the container - host and none
   177  	PortMapRaw       []map[string]string `mapstructure:"port_map"`           //
   178  	PortMap          map[string]int      `mapstructure:"-"`                  // A map of host port labels and the ports exposed on the container
   179  	Privileged       bool                `mapstructure:"privileged"`         // Flag to run the container in privileged mode
   180  	SysctlRaw        []map[string]string `mapstructure:"sysctl"`             //
   181  	Sysctl           map[string]string   `mapstructure:"-"`                  // The sysctl custom configurations
   182  	UlimitRaw        []map[string]string `mapstructure:"ulimit"`             //
   183  	Ulimit           []docker.ULimit     `mapstructure:"-"`                  // The ulimit custom configurations
   184  	DNSServers       []string            `mapstructure:"dns_servers"`        // DNS Server for containers
   185  	DNSSearchDomains []string            `mapstructure:"dns_search_domains"` // DNS Search domains for containers
   186  	DNSOptions       []string            `mapstructure:"dns_options"`        // DNS Options
   187  	ExtraHosts       []string            `mapstructure:"extra_hosts"`        // Add host to /etc/hosts (host:IP)
   188  	Hostname         string              `mapstructure:"hostname"`           // Hostname for containers
   189  	LabelsRaw        []map[string]string `mapstructure:"labels"`             //
   190  	Labels           map[string]string   `mapstructure:"-"`                  // Labels to set when the container starts up
   191  	Auth             []DockerDriverAuth  `mapstructure:"auth"`               // Authentication credentials for a private Docker registry
   192  	AuthSoftFail     bool                `mapstructure:"auth_soft_fail"`     // Soft-fail if auth creds are provided but fail
   193  	TTY              bool                `mapstructure:"tty"`                // Allocate a Pseudo-TTY
   194  	Interactive      bool                `mapstructure:"interactive"`        // Keep STDIN open even if not attached
   195  	ShmSize          int64               `mapstructure:"shm_size"`           // Size of /dev/shm of the container in bytes
   196  	WorkDir          string              `mapstructure:"work_dir"`           // Working directory inside the container
   197  	Logging          []DockerLoggingOpts `mapstructure:"logging"`            // Logging options for syslog server
   198  	Volumes          []string            `mapstructure:"volumes"`            // Host-Volumes to mount in, syntax: /path/to/host/directory:/destination/path/in/container
   199  	Mounts           []DockerMount       `mapstructure:"mounts"`             // Docker volumes to mount
   200  	VolumeDriver     string              `mapstructure:"volume_driver"`      // Docker volume driver used for the container's volumes
   201  	ForcePull        bool                `mapstructure:"force_pull"`         // Always force pull before running image, useful if your tags are mutable
   202  	MacAddress       string              `mapstructure:"mac_address"`        // Pin mac address to container
   203  	SecurityOpt      []string            `mapstructure:"security_opt"`       // Flags to pass directly to security-opt
   204  	Devices          []DockerDevice      `mapstructure:"devices"`            // To allow mounting USB or other serial control devices
   205  }
   206  
   207  func sliceMergeUlimit(ulimitsRaw map[string]string) ([]docker.ULimit, error) {
   208  	var ulimits []docker.ULimit
   209  
   210  	for name, ulimitRaw := range ulimitsRaw {
   211  		if len(ulimitRaw) == 0 {
   212  			return []docker.ULimit{}, fmt.Errorf("Malformed ulimit specification %v: %q, cannot be empty", name, ulimitRaw)
   213  		}
   214  		// hard limit is optional
   215  		if strings.Contains(ulimitRaw, ":") == false {
   216  			ulimitRaw = ulimitRaw + ":" + ulimitRaw
   217  		}
   218  
   219  		splitted := strings.SplitN(ulimitRaw, ":", 2)
   220  		if len(splitted) < 2 {
   221  			return []docker.ULimit{}, fmt.Errorf("Malformed ulimit specification %v: %v", name, ulimitRaw)
   222  		}
   223  		soft, err := strconv.Atoi(splitted[0])
   224  		if err != nil {
   225  			return []docker.ULimit{}, fmt.Errorf("Malformed soft ulimit %v: %v", name, ulimitRaw)
   226  		}
   227  		hard, err := strconv.Atoi(splitted[1])
   228  		if err != nil {
   229  			return []docker.ULimit{}, fmt.Errorf("Malformed hard ulimit %v: %v", name, ulimitRaw)
   230  		}
   231  
   232  		ulimit := docker.ULimit{
   233  			Name: name,
   234  			Soft: int64(soft),
   235  			Hard: int64(hard),
   236  		}
   237  		ulimits = append(ulimits, ulimit)
   238  	}
   239  	return ulimits, nil
   240  }
   241  
   242  // Validate validates a docker driver config
   243  func (c *DockerDriverConfig) Validate() error {
   244  	if c.ImageName == "" {
   245  		return fmt.Errorf("Docker Driver needs an image name")
   246  	}
   247  	if len(c.Devices) > 0 {
   248  		for _, dev := range c.Devices {
   249  			if dev.HostPath == "" {
   250  				return fmt.Errorf("host path must be set in configuration for devices")
   251  			}
   252  			if dev.CgroupPermissions != "" {
   253  				for _, c := range dev.CgroupPermissions {
   254  					ch := string(c)
   255  					if ch != "r" && ch != "w" && ch != "m" {
   256  						return fmt.Errorf("invalid cgroup permission string: %q", dev.CgroupPermissions)
   257  					}
   258  				}
   259  			}
   260  		}
   261  	}
   262  	c.Sysctl = mapMergeStrStr(c.SysctlRaw...)
   263  	c.Labels = mapMergeStrStr(c.LabelsRaw...)
   264  	if len(c.Logging) > 0 {
   265  		c.Logging[0].Config = mapMergeStrStr(c.Logging[0].ConfigRaw...)
   266  	}
   267  
   268  	mergedUlimitsRaw := mapMergeStrStr(c.UlimitRaw...)
   269  	ulimit, err := sliceMergeUlimit(mergedUlimitsRaw)
   270  	if err != nil {
   271  		return err
   272  	}
   273  	c.Ulimit = ulimit
   274  	return nil
   275  }
   276  
   277  // NewDockerDriverConfig returns a docker driver config by parsing the HCL
   278  // config
   279  func NewDockerDriverConfig(task *structs.Task, env *env.TaskEnv) (*DockerDriverConfig, error) {
   280  	var dconf DockerDriverConfig
   281  
   282  	if err := mapstructure.WeakDecode(task.Config, &dconf); err != nil {
   283  		return nil, err
   284  	}
   285  
   286  	// Interpolate everything that is a string
   287  	dconf.ImageName = env.ReplaceEnv(dconf.ImageName)
   288  	dconf.Command = env.ReplaceEnv(dconf.Command)
   289  	dconf.IpcMode = env.ReplaceEnv(dconf.IpcMode)
   290  	dconf.NetworkMode = env.ReplaceEnv(dconf.NetworkMode)
   291  	dconf.NetworkAliases = env.ParseAndReplace(dconf.NetworkAliases)
   292  	dconf.IPv4Address = env.ReplaceEnv(dconf.IPv4Address)
   293  	dconf.IPv6Address = env.ReplaceEnv(dconf.IPv6Address)
   294  	dconf.PidMode = env.ReplaceEnv(dconf.PidMode)
   295  	dconf.UTSMode = env.ReplaceEnv(dconf.UTSMode)
   296  	dconf.Hostname = env.ReplaceEnv(dconf.Hostname)
   297  	dconf.WorkDir = env.ReplaceEnv(dconf.WorkDir)
   298  	dconf.LoadImage = env.ReplaceEnv(dconf.LoadImage)
   299  	dconf.Volumes = env.ParseAndReplace(dconf.Volumes)
   300  	dconf.VolumeDriver = env.ReplaceEnv(dconf.VolumeDriver)
   301  	dconf.DNSServers = env.ParseAndReplace(dconf.DNSServers)
   302  	dconf.DNSSearchDomains = env.ParseAndReplace(dconf.DNSSearchDomains)
   303  	dconf.DNSOptions = env.ParseAndReplace(dconf.DNSOptions)
   304  	dconf.ExtraHosts = env.ParseAndReplace(dconf.ExtraHosts)
   305  	dconf.MacAddress = env.ReplaceEnv(dconf.MacAddress)
   306  	dconf.SecurityOpt = env.ParseAndReplace(dconf.SecurityOpt)
   307  
   308  	for _, m := range dconf.SysctlRaw {
   309  		for k, v := range m {
   310  			delete(m, k)
   311  			m[env.ReplaceEnv(k)] = env.ReplaceEnv(v)
   312  		}
   313  	}
   314  
   315  	for _, m := range dconf.UlimitRaw {
   316  		for k, v := range m {
   317  			delete(m, k)
   318  			m[env.ReplaceEnv(k)] = env.ReplaceEnv(v)
   319  		}
   320  	}
   321  
   322  	for _, m := range dconf.LabelsRaw {
   323  		for k, v := range m {
   324  			delete(m, k)
   325  			m[env.ReplaceEnv(k)] = env.ReplaceEnv(v)
   326  		}
   327  	}
   328  	dconf.Labels = mapMergeStrStr(dconf.LabelsRaw...)
   329  
   330  	for i, a := range dconf.Auth {
   331  		dconf.Auth[i].Username = env.ReplaceEnv(a.Username)
   332  		dconf.Auth[i].Password = env.ReplaceEnv(a.Password)
   333  		dconf.Auth[i].Email = env.ReplaceEnv(a.Email)
   334  		dconf.Auth[i].ServerAddress = env.ReplaceEnv(a.ServerAddress)
   335  	}
   336  
   337  	for i, l := range dconf.Logging {
   338  		dconf.Logging[i].Type = env.ReplaceEnv(l.Type)
   339  		for _, c := range l.ConfigRaw {
   340  			for k, v := range c {
   341  				delete(c, k)
   342  				c[env.ReplaceEnv(k)] = env.ReplaceEnv(v)
   343  			}
   344  		}
   345  	}
   346  
   347  	for i, m := range dconf.Mounts {
   348  		dconf.Mounts[i].Target = env.ReplaceEnv(m.Target)
   349  		dconf.Mounts[i].Source = env.ReplaceEnv(m.Source)
   350  
   351  		if len(m.VolumeOptions) > 1 {
   352  			return nil, fmt.Errorf("Only one volume_options stanza allowed")
   353  		}
   354  
   355  		if len(m.VolumeOptions) == 1 {
   356  			vo := m.VolumeOptions[0]
   357  			if len(vo.Labels) > 1 {
   358  				return nil, fmt.Errorf("labels may only be specified once in volume_options stanza")
   359  			}
   360  
   361  			if len(vo.Labels) == 1 {
   362  				for k, v := range vo.Labels[0] {
   363  					if k != env.ReplaceEnv(k) {
   364  						delete(vo.Labels[0], k)
   365  					}
   366  					vo.Labels[0][env.ReplaceEnv(k)] = env.ReplaceEnv(v)
   367  				}
   368  			}
   369  
   370  			if len(vo.DriverConfig) > 1 {
   371  				return nil, fmt.Errorf("volume driver config may only be specified once")
   372  			}
   373  			if len(vo.DriverConfig) == 1 {
   374  				vo.DriverConfig[0].Name = env.ReplaceEnv(vo.DriverConfig[0].Name)
   375  				if len(vo.DriverConfig[0].Options) > 1 {
   376  					return nil, fmt.Errorf("volume driver options may only be specified once")
   377  				}
   378  
   379  				if len(vo.DriverConfig[0].Options) == 1 {
   380  					options := vo.DriverConfig[0].Options[0]
   381  					for k, v := range options {
   382  						if k != env.ReplaceEnv(k) {
   383  							delete(options, k)
   384  						}
   385  						options[env.ReplaceEnv(k)] = env.ReplaceEnv(v)
   386  					}
   387  				}
   388  			}
   389  		}
   390  	}
   391  
   392  	if len(dconf.Logging) > 0 {
   393  		dconf.Logging[0].Config = mapMergeStrStr(dconf.Logging[0].ConfigRaw...)
   394  	}
   395  
   396  	portMap := make(map[string]int)
   397  	for _, m := range dconf.PortMapRaw {
   398  		for k, v := range m {
   399  			ki, vi := env.ReplaceEnv(k), env.ReplaceEnv(v)
   400  			p, err := strconv.Atoi(vi)
   401  			if err != nil {
   402  				return nil, fmt.Errorf("failed to parse port map value %v to %v: %v", ki, vi, err)
   403  			}
   404  			portMap[ki] = p
   405  		}
   406  	}
   407  	dconf.PortMap = portMap
   408  
   409  	// Remove any http
   410  	if strings.Contains(dconf.ImageName, "https://") {
   411  		dconf.ImageName = strings.Replace(dconf.ImageName, "https://", "", 1)
   412  	}
   413  
   414  	// If devices are configured set default cgroup permissions
   415  	if len(dconf.Devices) > 0 {
   416  		for i, dev := range dconf.Devices {
   417  			if dev.CgroupPermissions == "" {
   418  				dev.CgroupPermissions = "rwm"
   419  			}
   420  			dconf.Devices[i] = dev
   421  		}
   422  	}
   423  
   424  	if err := dconf.Validate(); err != nil {
   425  		return nil, err
   426  	}
   427  	return &dconf, nil
   428  }
   429  
   430  type dockerPID struct {
   431  	Version        string
   432  	Image          string
   433  	ImageID        string
   434  	ContainerID    string
   435  	KillTimeout    time.Duration
   436  	MaxKillTimeout time.Duration
   437  	PluginConfig   *PluginReattachConfig
   438  }
   439  
   440  type DockerHandle struct {
   441  	pluginClient      *plugin.Client
   442  	executor          executor.Executor
   443  	client            *docker.Client
   444  	waitClient        *docker.Client
   445  	logger            *log.Logger
   446  	Image             string
   447  	ImageID           string
   448  	containerID       string
   449  	version           string
   450  	killTimeout       time.Duration
   451  	maxKillTimeout    time.Duration
   452  	resourceUsageLock sync.RWMutex
   453  	resourceUsage     *cstructs.TaskResourceUsage
   454  	waitCh            chan *dstructs.WaitResult
   455  	doneCh            chan bool
   456  }
   457  
   458  func NewDockerDriver(ctx *DriverContext) Driver {
   459  	return &DockerDriver{DriverContext: *ctx}
   460  }
   461  
   462  func (d *DockerDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
   463  	// Initialize docker API clients
   464  	client, _, err := d.dockerClients()
   465  	if err != nil {
   466  		if d.fingerprintSuccess == nil || *d.fingerprintSuccess {
   467  			d.logger.Printf("[INFO] driver.docker: failed to initialize client: %s", err)
   468  		}
   469  		delete(node.Attributes, dockerDriverAttr)
   470  		d.fingerprintSuccess = helper.BoolToPtr(false)
   471  		return false, nil
   472  	}
   473  
   474  	// This is the first operation taken on the client so we'll try to
   475  	// establish a connection to the Docker daemon. If this fails it means
   476  	// Docker isn't available so we'll simply disable the docker driver.
   477  	env, err := client.Version()
   478  	if err != nil {
   479  		delete(node.Attributes, dockerDriverAttr)
   480  		if d.fingerprintSuccess == nil || *d.fingerprintSuccess {
   481  			d.logger.Printf("[DEBUG] driver.docker: could not connect to docker daemon at %s: %s", client.Endpoint(), err)
   482  		}
   483  		d.fingerprintSuccess = helper.BoolToPtr(false)
   484  		return false, nil
   485  	}
   486  
   487  	node.Attributes[dockerDriverAttr] = "1"
   488  	node.Attributes["driver.docker.version"] = env.Get("Version")
   489  
   490  	privileged := d.config.ReadBoolDefault(dockerPrivilegedConfigOption, false)
   491  	if privileged {
   492  		node.Attributes[dockerPrivilegedConfigOption] = "1"
   493  	}
   494  
   495  	// Advertise if this node supports Docker volumes
   496  	if d.config.ReadBoolDefault(dockerVolumesConfigOption, dockerVolumesConfigDefault) {
   497  		node.Attributes["driver."+dockerVolumesConfigOption] = "1"
   498  	}
   499  
   500  	// Detect bridge IP address - #2785
   501  	if nets, err := client.ListNetworks(); err != nil {
   502  		d.logger.Printf("[WARN] driver.docker: error discovering bridge IP: %v", err)
   503  	} else {
   504  		for _, n := range nets {
   505  			if n.Name != "bridge" {
   506  				continue
   507  			}
   508  
   509  			if len(n.IPAM.Config) == 0 {
   510  				d.logger.Printf("[WARN] driver.docker: no IPAM config for bridge network")
   511  				break
   512  			}
   513  
   514  			if n.IPAM.Config[0].Gateway != "" {
   515  				node.Attributes["driver.docker.bridge_ip"] = n.IPAM.Config[0].Gateway
   516  			} else if d.fingerprintSuccess == nil {
   517  				// Docker 17.09.0-ce dropped the Gateway IP from the bridge network
   518  				// See https://github.com/moby/moby/issues/32648
   519  				d.logger.Printf("[DEBUG] driver.docker: bridge_ip could not be discovered")
   520  			}
   521  			break
   522  		}
   523  	}
   524  
   525  	d.fingerprintSuccess = helper.BoolToPtr(true)
   526  	return true, nil
   527  }
   528  
   529  // Validate is used to validate the driver configuration
   530  func (d *DockerDriver) Validate(config map[string]interface{}) error {
   531  	fd := &fields.FieldData{
   532  		Raw: config,
   533  		Schema: map[string]*fields.FieldSchema{
   534  			"image": {
   535  				Type:     fields.TypeString,
   536  				Required: true,
   537  			},
   538  			"load": {
   539  				Type: fields.TypeString,
   540  			},
   541  			"command": {
   542  				Type: fields.TypeString,
   543  			},
   544  			"args": {
   545  				Type: fields.TypeArray,
   546  			},
   547  			"ipc_mode": {
   548  				Type: fields.TypeString,
   549  			},
   550  			"network_mode": {
   551  				Type: fields.TypeString,
   552  			},
   553  			"network_aliases": {
   554  				Type: fields.TypeArray,
   555  			},
   556  			"ipv4_address": {
   557  				Type: fields.TypeString,
   558  			},
   559  			"ipv6_address": {
   560  				Type: fields.TypeString,
   561  			},
   562  			"mac_address": {
   563  				Type: fields.TypeString,
   564  			},
   565  			"pid_mode": {
   566  				Type: fields.TypeString,
   567  			},
   568  			"uts_mode": {
   569  				Type: fields.TypeString,
   570  			},
   571  			"userns_mode": {
   572  				Type: fields.TypeString,
   573  			},
   574  			"sysctl": {
   575  				Type: fields.TypeArray,
   576  			},
   577  			"ulimit": {
   578  				Type: fields.TypeArray,
   579  			},
   580  			"port_map": {
   581  				Type: fields.TypeArray,
   582  			},
   583  			"privileged": {
   584  				Type: fields.TypeBool,
   585  			},
   586  			"dns_servers": {
   587  				Type: fields.TypeArray,
   588  			},
   589  			"dns_options": {
   590  				Type: fields.TypeArray,
   591  			},
   592  			"dns_search_domains": {
   593  				Type: fields.TypeArray,
   594  			},
   595  			"extra_hosts": {
   596  				Type: fields.TypeArray,
   597  			},
   598  			"hostname": {
   599  				Type: fields.TypeString,
   600  			},
   601  			"labels": {
   602  				Type: fields.TypeArray,
   603  			},
   604  			"auth": {
   605  				Type: fields.TypeArray,
   606  			},
   607  			"auth_soft_fail": {
   608  				Type: fields.TypeBool,
   609  			},
   610  			// COMPAT: Remove in 0.6.0. SSL is no longer needed
   611  			"ssl": {
   612  				Type: fields.TypeBool,
   613  			},
   614  			"tty": {
   615  				Type: fields.TypeBool,
   616  			},
   617  			"interactive": {
   618  				Type: fields.TypeBool,
   619  			},
   620  			"shm_size": {
   621  				Type: fields.TypeInt,
   622  			},
   623  			"work_dir": {
   624  				Type: fields.TypeString,
   625  			},
   626  			"logging": {
   627  				Type: fields.TypeArray,
   628  			},
   629  			"volumes": {
   630  				Type: fields.TypeArray,
   631  			},
   632  			"volume_driver": {
   633  				Type: fields.TypeString,
   634  			},
   635  			"mounts": {
   636  				Type: fields.TypeArray,
   637  			},
   638  			"force_pull": {
   639  				Type: fields.TypeBool,
   640  			},
   641  			"security_opt": {
   642  				Type: fields.TypeArray,
   643  			},
   644  			"devices": {
   645  				Type: fields.TypeArray,
   646  			},
   647  		},
   648  	}
   649  
   650  	if err := fd.Validate(); err != nil {
   651  		return err
   652  	}
   653  
   654  	return nil
   655  }
   656  
   657  func (d *DockerDriver) Abilities() DriverAbilities {
   658  	return DriverAbilities{
   659  		SendSignals: true,
   660  		Exec:        true,
   661  	}
   662  }
   663  
   664  func (d *DockerDriver) FSIsolation() cstructs.FSIsolation {
   665  	return cstructs.FSIsolationImage
   666  }
   667  
   668  // getDockerCoordinator returns the docker coordinator and the caller ID to use when
   669  // interacting with the coordinator
   670  func (d *DockerDriver) getDockerCoordinator(client *docker.Client) (*dockerCoordinator, string) {
   671  	config := &dockerCoordinatorConfig{
   672  		client:      client,
   673  		cleanup:     d.config.ReadBoolDefault(dockerCleanupImageConfigOption, dockerCleanupImageConfigDefault),
   674  		logger:      d.logger,
   675  		removeDelay: d.config.ReadDurationDefault(dockerImageRemoveDelayConfigOption, dockerImageRemoveDelayConfigDefault),
   676  	}
   677  
   678  	return GetDockerCoordinator(config), fmt.Sprintf("%s-%s", d.DriverContext.allocID, d.DriverContext.taskName)
   679  }
   680  
   681  func (d *DockerDriver) Prestart(ctx *ExecContext, task *structs.Task) (*PrestartResponse, error) {
   682  	driverConfig, err := NewDockerDriverConfig(task, ctx.TaskEnv)
   683  	if err != nil {
   684  		return nil, err
   685  	}
   686  
   687  	// Set state needed by Start
   688  	d.driverConfig = driverConfig
   689  
   690  	// Initialize docker API clients
   691  	client, _, err := d.dockerClients()
   692  	if err != nil {
   693  		return nil, fmt.Errorf("Failed to connect to docker daemon: %s", err)
   694  	}
   695  
   696  	// Ensure the image is available
   697  	id, err := d.createImage(driverConfig, client, ctx.TaskDir)
   698  	if err != nil {
   699  		return nil, err
   700  	}
   701  	d.imageID = id
   702  
   703  	resp := NewPrestartResponse()
   704  	resp.CreatedResources.Add(dockerImageResKey, id)
   705  
   706  	// Return the PortMap if it's set
   707  	if len(driverConfig.PortMap) > 0 {
   708  		resp.Network = &cstructs.DriverNetwork{
   709  			PortMap: driverConfig.PortMap,
   710  		}
   711  	}
   712  	return resp, nil
   713  }
   714  
   715  func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (*StartResponse, error) {
   716  	pluginLogFile := filepath.Join(ctx.TaskDir.Dir, "executor.out")
   717  	executorConfig := &dstructs.ExecutorConfig{
   718  		LogFile:  pluginLogFile,
   719  		LogLevel: d.config.LogLevel,
   720  	}
   721  
   722  	exec, pluginClient, err := createExecutor(d.config.LogOutput, d.config, executorConfig)
   723  	if err != nil {
   724  		return nil, err
   725  	}
   726  	executorCtx := &executor.ExecutorContext{
   727  		TaskEnv:        ctx.TaskEnv,
   728  		Task:           task,
   729  		Driver:         "docker",
   730  		LogDir:         ctx.TaskDir.LogDir,
   731  		TaskDir:        ctx.TaskDir.Dir,
   732  		PortLowerBound: d.config.ClientMinPort,
   733  		PortUpperBound: d.config.ClientMaxPort,
   734  	}
   735  	if err := exec.SetContext(executorCtx); err != nil {
   736  		pluginClient.Kill()
   737  		return nil, fmt.Errorf("failed to set executor context: %v", err)
   738  	}
   739  
   740  	// The user hasn't specified any logging options so launch our own syslog
   741  	// server if possible.
   742  	syslogAddr := ""
   743  	if len(d.driverConfig.Logging) == 0 {
   744  		if runtime.GOOS == "darwin" {
   745  			d.logger.Printf("[DEBUG] driver.docker: disabling syslog driver as Docker for Mac workaround")
   746  		} else {
   747  			ss, err := exec.LaunchSyslogServer()
   748  			if err != nil {
   749  				pluginClient.Kill()
   750  				return nil, fmt.Errorf("failed to start syslog collector: %v", err)
   751  			}
   752  			syslogAddr = ss.Addr
   753  		}
   754  	}
   755  
   756  	config, err := d.createContainerConfig(ctx, task, d.driverConfig, syslogAddr)
   757  	if err != nil {
   758  		d.logger.Printf("[ERR] driver.docker: failed to create container configuration for image %q (%q): %v", d.driverConfig.ImageName, d.imageID, err)
   759  		pluginClient.Kill()
   760  		return nil, fmt.Errorf("Failed to create container configuration for image %q (%q): %v", d.driverConfig.ImageName, d.imageID, err)
   761  	}
   762  
   763  	container, err := d.createContainer(config)
   764  	if err != nil {
   765  		wrapped := fmt.Sprintf("Failed to create container: %v", err)
   766  		d.logger.Printf("[ERR] driver.docker: %s", wrapped)
   767  		pluginClient.Kill()
   768  		return nil, structs.WrapRecoverable(wrapped, err)
   769  	}
   770  
   771  	d.logger.Printf("[INFO] driver.docker: created container %s", container.ID)
   772  
   773  	// We don't need to start the container if the container is already running
   774  	// since we don't create containers which are already present on the host
   775  	// and are running
   776  	if !container.State.Running {
   777  		// Start the container
   778  		if err := d.startContainer(container); err != nil {
   779  			d.logger.Printf("[ERR] driver.docker: failed to start container %s: %s", container.ID, err)
   780  			pluginClient.Kill()
   781  			return nil, structs.NewRecoverableError(fmt.Errorf("Failed to start container %s: %s", container.ID, err), structs.IsRecoverable(err))
   782  		}
   783  
   784  		// InspectContainer to get all of the container metadata as
   785  		// much of the metadata (eg networking) isn't populated until
   786  		// the container is started
   787  		runningContainer, err := client.InspectContainer(container.ID)
   788  		if err != nil {
   789  			err = fmt.Errorf("failed to inspect started container %s: %s", container.ID, err)
   790  			d.logger.Printf("[ERR] driver.docker: %v", err)
   791  			pluginClient.Kill()
   792  			return nil, structs.NewRecoverableError(err, true)
   793  		}
   794  		container = runningContainer
   795  		d.logger.Printf("[INFO] driver.docker: started container %s", container.ID)
   796  	} else {
   797  		d.logger.Printf("[DEBUG] driver.docker: re-attaching to container %s with status %q",
   798  			container.ID, container.State.String())
   799  	}
   800  
   801  	// Return a driver handle
   802  	maxKill := d.DriverContext.config.MaxKillTimeout
   803  	h := &DockerHandle{
   804  		client:         client,
   805  		waitClient:     waitClient,
   806  		executor:       exec,
   807  		pluginClient:   pluginClient,
   808  		logger:         d.logger,
   809  		Image:          d.driverConfig.ImageName,
   810  		ImageID:        d.imageID,
   811  		containerID:    container.ID,
   812  		version:        d.config.Version.VersionNumber(),
   813  		killTimeout:    GetKillTimeout(task.KillTimeout, maxKill),
   814  		maxKillTimeout: maxKill,
   815  		doneCh:         make(chan bool),
   816  		waitCh:         make(chan *dstructs.WaitResult, 1),
   817  	}
   818  	go h.collectStats()
   819  	go h.run()
   820  
   821  	// Detect container address
   822  	ip, autoUse := d.detectIP(container)
   823  
   824  	// Create a response with the driver handle and container network metadata
   825  	resp := &StartResponse{
   826  		Handle: h,
   827  		Network: &cstructs.DriverNetwork{
   828  			PortMap:       d.driverConfig.PortMap,
   829  			IP:            ip,
   830  			AutoAdvertise: autoUse,
   831  		},
   832  	}
   833  	return resp, nil
   834  }
   835  
   836  // detectIP of Docker container. Returns the first IP found as well as true if
   837  // the IP should be advertised (bridge network IPs return false). Returns an
   838  // empty string and false if no IP could be found.
   839  func (d *DockerDriver) detectIP(c *docker.Container) (string, bool) {
   840  	if c.NetworkSettings == nil {
   841  		// This should only happen if there's been a coding error (such
   842  		// as not calling InspetContainer after CreateContainer). Code
   843  		// defensively in case the Docker API changes subtly.
   844  		d.logger.Printf("[ERROR] driver.docker: no network settings for container %s", c.ID)
   845  		return "", false
   846  	}
   847  
   848  	ip, ipName := "", ""
   849  	auto := false
   850  	for name, net := range c.NetworkSettings.Networks {
   851  		if net.IPAddress == "" {
   852  			// Ignore networks without an IP address
   853  			continue
   854  		}
   855  
   856  		ip = net.IPAddress
   857  		ipName = name
   858  
   859  		// Don't auto-advertise IPs for default networks (bridge on
   860  		// Linux, nat on Windows)
   861  		if name != "bridge" && name != "nat" {
   862  			auto = true
   863  		}
   864  
   865  		break
   866  	}
   867  
   868  	if n := len(c.NetworkSettings.Networks); n > 1 {
   869  		d.logger.Printf("[WARN] driver.docker: multiple (%d) Docker networks for container %q but Nomad only supports 1: choosing %q", n, c.ID, ipName)
   870  	}
   871  
   872  	return ip, auto
   873  }
   874  
   875  func (d *DockerDriver) Cleanup(_ *ExecContext, res *CreatedResources) error {
   876  	retry := false
   877  	var merr multierror.Error
   878  	for key, resources := range res.Resources {
   879  		switch key {
   880  		case dockerImageResKey:
   881  			for _, value := range resources {
   882  				err := d.cleanupImage(value)
   883  				if err != nil {
   884  					if structs.IsRecoverable(err) {
   885  						retry = true
   886  					}
   887  					merr.Errors = append(merr.Errors, err)
   888  					continue
   889  				}
   890  
   891  				// Remove cleaned image from resources
   892  				res.Remove(dockerImageResKey, value)
   893  			}
   894  		default:
   895  			d.logger.Printf("[ERR] driver.docker: unknown resource to cleanup: %q", key)
   896  		}
   897  	}
   898  	return structs.NewRecoverableError(merr.ErrorOrNil(), retry)
   899  }
   900  
   901  // cleanupImage removes a Docker image. No error is returned if the image
   902  // doesn't exist or is still in use. Requires the global client to already be
   903  // initialized.
   904  func (d *DockerDriver) cleanupImage(imageID string) error {
   905  	if !d.config.ReadBoolDefault(dockerCleanupImageConfigOption, dockerCleanupImageConfigDefault) {
   906  		// Config says not to cleanup
   907  		return nil
   908  	}
   909  
   910  	coordinator, callerID := d.getDockerCoordinator(client)
   911  	coordinator.RemoveImage(imageID, callerID)
   912  
   913  	return nil
   914  }
   915  
   916  // dockerClients creates two *docker.Client, one for long running operations and
   917  // the other for shorter operations. In test / dev mode we can use ENV vars to
   918  // connect to the docker daemon. In production mode we will read docker.endpoint
   919  // from the config file.
   920  func (d *DockerDriver) dockerClients() (*docker.Client, *docker.Client, error) {
   921  	if client != nil && waitClient != nil {
   922  		return client, waitClient, nil
   923  	}
   924  
   925  	var err error
   926  	var merr multierror.Error
   927  	createClients.Do(func() {
   928  		// Default to using whatever is configured in docker.endpoint. If this is
   929  		// not specified we'll fall back on NewClientFromEnv which reads config from
   930  		// the DOCKER_* environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and
   931  		// DOCKER_CERT_PATH. This allows us to lock down the config in production
   932  		// but also accept the standard ENV configs for dev and test.
   933  		dockerEndpoint := d.config.Read("docker.endpoint")
   934  		if dockerEndpoint != "" {
   935  			cert := d.config.Read("docker.tls.cert")
   936  			key := d.config.Read("docker.tls.key")
   937  			ca := d.config.Read("docker.tls.ca")
   938  
   939  			if cert+key+ca != "" {
   940  				d.logger.Printf("[DEBUG] driver.docker: using TLS client connection to %s", dockerEndpoint)
   941  				client, err = docker.NewTLSClient(dockerEndpoint, cert, key, ca)
   942  				if err != nil {
   943  					merr.Errors = append(merr.Errors, err)
   944  				}
   945  				waitClient, err = docker.NewTLSClient(dockerEndpoint, cert, key, ca)
   946  				if err != nil {
   947  					merr.Errors = append(merr.Errors, err)
   948  				}
   949  			} else {
   950  				d.logger.Printf("[DEBUG] driver.docker: using standard client connection to %s", dockerEndpoint)
   951  				client, err = docker.NewClient(dockerEndpoint)
   952  				if err != nil {
   953  					merr.Errors = append(merr.Errors, err)
   954  				}
   955  				waitClient, err = docker.NewClient(dockerEndpoint)
   956  				if err != nil {
   957  					merr.Errors = append(merr.Errors, err)
   958  				}
   959  			}
   960  			client.SetTimeout(dockerTimeout)
   961  			return
   962  		}
   963  
   964  		d.logger.Println("[DEBUG] driver.docker: using client connection initialized from environment")
   965  		client, err = docker.NewClientFromEnv()
   966  		if err != nil {
   967  			merr.Errors = append(merr.Errors, err)
   968  		}
   969  		client.SetTimeout(dockerTimeout)
   970  
   971  		waitClient, err = docker.NewClientFromEnv()
   972  		if err != nil {
   973  			merr.Errors = append(merr.Errors, err)
   974  		}
   975  	})
   976  	return client, waitClient, merr.ErrorOrNil()
   977  }
   978  
   979  func (d *DockerDriver) containerBinds(driverConfig *DockerDriverConfig, taskDir *allocdir.TaskDir,
   980  	task *structs.Task) ([]string, error) {
   981  
   982  	allocDirBind := fmt.Sprintf("%s:%s", taskDir.SharedAllocDir, allocdir.SharedAllocContainerPath)
   983  	taskLocalBind := fmt.Sprintf("%s:%s", taskDir.LocalDir, allocdir.TaskLocalContainerPath)
   984  	secretDirBind := fmt.Sprintf("%s:%s", taskDir.SecretsDir, allocdir.TaskSecretsContainerPath)
   985  	binds := []string{allocDirBind, taskLocalBind, secretDirBind}
   986  
   987  	volumesEnabled := d.config.ReadBoolDefault(dockerVolumesConfigOption, dockerVolumesConfigDefault)
   988  
   989  	if !volumesEnabled && driverConfig.VolumeDriver != "" {
   990  		return nil, fmt.Errorf("%s is false; cannot use volume driver %q", dockerVolumesConfigOption, driverConfig.VolumeDriver)
   991  	}
   992  
   993  	for _, userbind := range driverConfig.Volumes {
   994  		parts := strings.Split(userbind, ":")
   995  		if len(parts) < 2 {
   996  			return nil, fmt.Errorf("invalid docker volume: %q", userbind)
   997  		}
   998  
   999  		// Resolve dotted path segments
  1000  		parts[0] = filepath.Clean(parts[0])
  1001  
  1002  		// Absolute paths aren't always supported
  1003  		if filepath.IsAbs(parts[0]) {
  1004  			if !volumesEnabled {
  1005  				// Disallow mounting arbitrary absolute paths
  1006  				return nil, fmt.Errorf("%s is false; cannot mount host paths: %+q", dockerVolumesConfigOption, userbind)
  1007  			}
  1008  			binds = append(binds, userbind)
  1009  			continue
  1010  		}
  1011  
  1012  		// Relative paths are always allowed as they mount within a container
  1013  		// When a VolumeDriver is set, we assume we receive a binding in the format volume-name:container-dest
  1014  		// Otherwise, we assume we receive a relative path binding in the format relative/to/task:/also/in/container
  1015  		if driverConfig.VolumeDriver == "" {
  1016  			// Expand path relative to alloc dir
  1017  			parts[0] = filepath.Join(taskDir.Dir, parts[0])
  1018  		}
  1019  
  1020  		binds = append(binds, strings.Join(parts, ":"))
  1021  	}
  1022  
  1023  	if selinuxLabel := d.config.Read(dockerSELinuxLabelConfigOption); selinuxLabel != "" {
  1024  		// Apply SELinux Label to each volume
  1025  		for i := range binds {
  1026  			binds[i] = fmt.Sprintf("%s:%s", binds[i], selinuxLabel)
  1027  		}
  1028  	}
  1029  
  1030  	return binds, nil
  1031  }
  1032  
  1033  // createContainerConfig initializes a struct needed to call docker.client.CreateContainer()
  1034  func (d *DockerDriver) createContainerConfig(ctx *ExecContext, task *structs.Task,
  1035  	driverConfig *DockerDriverConfig, syslogAddr string) (docker.CreateContainerOptions, error) {
  1036  	var c docker.CreateContainerOptions
  1037  	if task.Resources == nil {
  1038  		// Guard against missing resources. We should never have been able to
  1039  		// schedule a job without specifying this.
  1040  		d.logger.Println("[ERR] driver.docker: task.Resources is empty")
  1041  		return c, fmt.Errorf("task.Resources is empty")
  1042  	}
  1043  
  1044  	binds, err := d.containerBinds(driverConfig, ctx.TaskDir, task)
  1045  	if err != nil {
  1046  		return c, err
  1047  	}
  1048  
  1049  	// create the config block that will later be consumed by go-dockerclient
  1050  	config := &docker.Config{
  1051  		Image:       d.imageID,
  1052  		Hostname:    driverConfig.Hostname,
  1053  		User:        task.User,
  1054  		Tty:         driverConfig.TTY,
  1055  		OpenStdin:   driverConfig.Interactive,
  1056  		StopTimeout: int(task.KillTimeout.Seconds()),
  1057  		StopSignal:  task.KillSignal,
  1058  	}
  1059  
  1060  	if driverConfig.WorkDir != "" {
  1061  		config.WorkingDir = driverConfig.WorkDir
  1062  	}
  1063  
  1064  	memLimit := int64(task.Resources.MemoryMB) * 1024 * 1024
  1065  
  1066  	if len(driverConfig.Logging) == 0 {
  1067  		if runtime.GOOS == "darwin" {
  1068  			d.logger.Printf("[DEBUG] driver.docker: deferring logging to docker on Docker for Mac")
  1069  		} else {
  1070  			d.logger.Printf("[DEBUG] driver.docker: Setting default logging options to syslog and %s", syslogAddr)
  1071  			driverConfig.Logging = []DockerLoggingOpts{
  1072  				{Type: "syslog", Config: map[string]string{"syslog-address": syslogAddr}},
  1073  			}
  1074  		}
  1075  	}
  1076  
  1077  	hostConfig := &docker.HostConfig{
  1078  		// Convert MB to bytes. This is an absolute value.
  1079  		Memory: memLimit,
  1080  		// Convert Mhz to shares. This is a relative value.
  1081  		CPUShares: int64(task.Resources.CPU),
  1082  
  1083  		// Binds are used to mount a host volume into the container. We mount a
  1084  		// local directory for storage and a shared alloc directory that can be
  1085  		// used to share data between different tasks in the same task group.
  1086  		Binds: binds,
  1087  
  1088  		VolumeDriver: driverConfig.VolumeDriver,
  1089  	}
  1090  
  1091  	// Windows does not support MemorySwap/MemorySwappiness #2193
  1092  	if runtime.GOOS == "windows" {
  1093  		hostConfig.MemorySwap = 0
  1094  		hostConfig.MemorySwappiness = -1
  1095  	} else {
  1096  		hostConfig.MemorySwap = memLimit // MemorySwap is memory + swap.
  1097  	}
  1098  
  1099  	if len(driverConfig.Logging) != 0 {
  1100  		d.logger.Printf("[DEBUG] driver.docker: Using config for logging: %+v", driverConfig.Logging[0])
  1101  		hostConfig.LogConfig = docker.LogConfig{
  1102  			Type:   driverConfig.Logging[0].Type,
  1103  			Config: driverConfig.Logging[0].Config,
  1104  		}
  1105  	}
  1106  
  1107  	d.logger.Printf("[DEBUG] driver.docker: using %d bytes memory for %s", hostConfig.Memory, task.Name)
  1108  	d.logger.Printf("[DEBUG] driver.docker: using %d cpu shares for %s", hostConfig.CPUShares, task.Name)
  1109  	d.logger.Printf("[DEBUG] driver.docker: binding directories %#v for %s", hostConfig.Binds, task.Name)
  1110  
  1111  	//  set privileged mode
  1112  	hostPrivileged := d.config.ReadBoolDefault(dockerPrivilegedConfigOption, false)
  1113  	if driverConfig.Privileged && !hostPrivileged {
  1114  		return c, fmt.Errorf(`Docker privileged mode is disabled on this Nomad agent`)
  1115  	}
  1116  	hostConfig.Privileged = driverConfig.Privileged
  1117  
  1118  	// set SHM size
  1119  	if driverConfig.ShmSize != 0 {
  1120  		hostConfig.ShmSize = driverConfig.ShmSize
  1121  	}
  1122  
  1123  	// set DNS servers
  1124  	for _, ip := range driverConfig.DNSServers {
  1125  		if net.ParseIP(ip) != nil {
  1126  			hostConfig.DNS = append(hostConfig.DNS, ip)
  1127  		} else {
  1128  			d.logger.Printf("[ERR] driver.docker: invalid ip address for container dns server: %s", ip)
  1129  		}
  1130  	}
  1131  
  1132  	if len(driverConfig.Devices) > 0 {
  1133  		var devices []docker.Device
  1134  		for _, device := range driverConfig.Devices {
  1135  			dev := docker.Device{
  1136  				PathOnHost:        device.HostPath,
  1137  				PathInContainer:   device.ContainerPath,
  1138  				CgroupPermissions: device.CgroupPermissions}
  1139  			devices = append(devices, dev)
  1140  		}
  1141  		hostConfig.Devices = devices
  1142  	}
  1143  
  1144  	// Setup mounts
  1145  	for _, m := range driverConfig.Mounts {
  1146  		hm := docker.HostMount{
  1147  			Target:   m.Target,
  1148  			Source:   m.Source,
  1149  			Type:     "volume", // Only type supported
  1150  			ReadOnly: m.ReadOnly,
  1151  		}
  1152  		if len(m.VolumeOptions) == 1 {
  1153  			vo := m.VolumeOptions[0]
  1154  			hm.VolumeOptions = &docker.VolumeOptions{
  1155  				NoCopy: vo.NoCopy,
  1156  			}
  1157  
  1158  			if len(vo.DriverConfig) == 1 {
  1159  				dc := vo.DriverConfig[0]
  1160  				hm.VolumeOptions.DriverConfig = docker.VolumeDriverConfig{
  1161  					Name: dc.Name,
  1162  				}
  1163  				if len(dc.Options) == 1 {
  1164  					hm.VolumeOptions.DriverConfig.Options = dc.Options[0]
  1165  				}
  1166  			}
  1167  			if len(vo.Labels) == 1 {
  1168  				hm.VolumeOptions.Labels = vo.Labels[0]
  1169  			}
  1170  		}
  1171  		hostConfig.Mounts = append(hostConfig.Mounts, hm)
  1172  	}
  1173  
  1174  	// set DNS search domains and extra hosts
  1175  	hostConfig.DNSSearch = driverConfig.DNSSearchDomains
  1176  	hostConfig.DNSOptions = driverConfig.DNSOptions
  1177  	hostConfig.ExtraHosts = driverConfig.ExtraHosts
  1178  
  1179  	hostConfig.IpcMode = driverConfig.IpcMode
  1180  	hostConfig.PidMode = driverConfig.PidMode
  1181  	hostConfig.UTSMode = driverConfig.UTSMode
  1182  	hostConfig.UsernsMode = driverConfig.UsernsMode
  1183  	hostConfig.SecurityOpt = driverConfig.SecurityOpt
  1184  	hostConfig.Sysctls = driverConfig.Sysctl
  1185  	hostConfig.Ulimits = driverConfig.Ulimit
  1186  
  1187  	hostConfig.NetworkMode = driverConfig.NetworkMode
  1188  	if hostConfig.NetworkMode == "" {
  1189  		// docker default
  1190  		d.logger.Printf("[DEBUG] driver.docker: networking mode not specified; defaulting to %s", defaultNetworkMode)
  1191  		hostConfig.NetworkMode = defaultNetworkMode
  1192  	}
  1193  
  1194  	// Setup port mapping and exposed ports
  1195  	if len(task.Resources.Networks) == 0 {
  1196  		d.logger.Println("[DEBUG] driver.docker: No network interfaces are available")
  1197  		if len(driverConfig.PortMap) > 0 {
  1198  			return c, fmt.Errorf("Trying to map ports but no network interface is available")
  1199  		}
  1200  	} else {
  1201  		// TODO add support for more than one network
  1202  		network := task.Resources.Networks[0]
  1203  		publishedPorts := map[docker.Port][]docker.PortBinding{}
  1204  		exposedPorts := map[docker.Port]struct{}{}
  1205  
  1206  		for _, port := range network.ReservedPorts {
  1207  			// By default we will map the allocated port 1:1 to the container
  1208  			containerPortInt := port.Value
  1209  
  1210  			// If the user has mapped a port using port_map we'll change it here
  1211  			if mapped, ok := driverConfig.PortMap[port.Label]; ok {
  1212  				containerPortInt = mapped
  1213  			}
  1214  
  1215  			hostPortStr := strconv.Itoa(port.Value)
  1216  			containerPort := docker.Port(strconv.Itoa(containerPortInt))
  1217  
  1218  			publishedPorts[containerPort+"/tcp"] = getPortBinding(network.IP, hostPortStr)
  1219  			publishedPorts[containerPort+"/udp"] = getPortBinding(network.IP, hostPortStr)
  1220  			d.logger.Printf("[DEBUG] driver.docker: allocated port %s:%d -> %d (static)", network.IP, port.Value, port.Value)
  1221  
  1222  			exposedPorts[containerPort+"/tcp"] = struct{}{}
  1223  			exposedPorts[containerPort+"/udp"] = struct{}{}
  1224  			d.logger.Printf("[DEBUG] driver.docker: exposed port %d", port.Value)
  1225  		}
  1226  
  1227  		for _, port := range network.DynamicPorts {
  1228  			// By default we will map the allocated port 1:1 to the container
  1229  			containerPortInt := port.Value
  1230  
  1231  			// If the user has mapped a port using port_map we'll change it here
  1232  			if mapped, ok := driverConfig.PortMap[port.Label]; ok {
  1233  				containerPortInt = mapped
  1234  			}
  1235  
  1236  			hostPortStr := strconv.Itoa(port.Value)
  1237  			containerPort := docker.Port(strconv.Itoa(containerPortInt))
  1238  
  1239  			publishedPorts[containerPort+"/tcp"] = getPortBinding(network.IP, hostPortStr)
  1240  			publishedPorts[containerPort+"/udp"] = getPortBinding(network.IP, hostPortStr)
  1241  			d.logger.Printf("[DEBUG] driver.docker: allocated port %s:%d -> %d (mapped)", network.IP, port.Value, containerPortInt)
  1242  
  1243  			exposedPorts[containerPort+"/tcp"] = struct{}{}
  1244  			exposedPorts[containerPort+"/udp"] = struct{}{}
  1245  			d.logger.Printf("[DEBUG] driver.docker: exposed port %s", containerPort)
  1246  		}
  1247  
  1248  		hostConfig.PortBindings = publishedPorts
  1249  		config.ExposedPorts = exposedPorts
  1250  	}
  1251  
  1252  	parsedArgs := ctx.TaskEnv.ParseAndReplace(driverConfig.Args)
  1253  
  1254  	// If the user specified a custom command to run, we'll inject it here.
  1255  	if driverConfig.Command != "" {
  1256  		// Validate command
  1257  		if err := validateCommand(driverConfig.Command, "args"); err != nil {
  1258  			return c, err
  1259  		}
  1260  
  1261  		cmd := []string{driverConfig.Command}
  1262  		if len(driverConfig.Args) != 0 {
  1263  			cmd = append(cmd, parsedArgs...)
  1264  		}
  1265  		d.logger.Printf("[DEBUG] driver.docker: setting container startup command to: %s", strings.Join(cmd, " "))
  1266  		config.Cmd = cmd
  1267  	} else if len(driverConfig.Args) != 0 {
  1268  		config.Cmd = parsedArgs
  1269  	}
  1270  
  1271  	if len(driverConfig.Labels) > 0 {
  1272  		config.Labels = driverConfig.Labels
  1273  		d.logger.Printf("[DEBUG] driver.docker: applied labels on the container: %+v", config.Labels)
  1274  	}
  1275  
  1276  	config.Env = ctx.TaskEnv.List()
  1277  
  1278  	containerName := fmt.Sprintf("%s-%s", task.Name, d.DriverContext.allocID)
  1279  	d.logger.Printf("[DEBUG] driver.docker: setting container name to: %s", containerName)
  1280  
  1281  	var networkingConfig *docker.NetworkingConfig
  1282  	if len(driverConfig.NetworkAliases) > 0 || driverConfig.IPv4Address != "" || driverConfig.IPv6Address != "" {
  1283  		networkingConfig = &docker.NetworkingConfig{
  1284  			EndpointsConfig: map[string]*docker.EndpointConfig{
  1285  				hostConfig.NetworkMode: {},
  1286  			},
  1287  		}
  1288  	}
  1289  
  1290  	if len(driverConfig.NetworkAliases) > 0 {
  1291  		networkingConfig.EndpointsConfig[hostConfig.NetworkMode].Aliases = driverConfig.NetworkAliases
  1292  		d.logger.Printf("[DEBUG] driver.docker: using network_mode %q with network aliases: %v",
  1293  			hostConfig.NetworkMode, strings.Join(driverConfig.NetworkAliases, ", "))
  1294  	}
  1295  
  1296  	if driverConfig.IPv4Address != "" || driverConfig.IPv6Address != "" {
  1297  		networkingConfig.EndpointsConfig[hostConfig.NetworkMode].IPAMConfig = &docker.EndpointIPAMConfig{
  1298  			IPv4Address: driverConfig.IPv4Address,
  1299  			IPv6Address: driverConfig.IPv6Address,
  1300  		}
  1301  		d.logger.Printf("[DEBUG] driver.docker: using network_mode %q with ipv4: %q and ipv6: %q",
  1302  			hostConfig.NetworkMode, driverConfig.IPv4Address, driverConfig.IPv6Address)
  1303  	}
  1304  
  1305  	if driverConfig.MacAddress != "" {
  1306  		config.MacAddress = driverConfig.MacAddress
  1307  		d.logger.Printf("[DEBUG] driver.docker: using pinned mac address: %q", config.MacAddress)
  1308  	}
  1309  
  1310  	return docker.CreateContainerOptions{
  1311  		Name:             containerName,
  1312  		Config:           config,
  1313  		HostConfig:       hostConfig,
  1314  		NetworkingConfig: networkingConfig,
  1315  	}, nil
  1316  }
  1317  
  1318  func (d *DockerDriver) Periodic() (bool, time.Duration) {
  1319  	return true, 15 * time.Second
  1320  }
  1321  
  1322  // createImage creates a docker image either by pulling it from a registry or by
  1323  // loading it from the file system
  1324  func (d *DockerDriver) createImage(driverConfig *DockerDriverConfig, client *docker.Client, taskDir *allocdir.TaskDir) (string, error) {
  1325  	image := driverConfig.ImageName
  1326  	repo, tag := docker.ParseRepositoryTag(image)
  1327  	if tag == "" {
  1328  		tag = "latest"
  1329  	}
  1330  
  1331  	coordinator, callerID := d.getDockerCoordinator(client)
  1332  
  1333  	// We're going to check whether the image is already downloaded. If the tag
  1334  	// is "latest", or ForcePull is set, we have to check for a new version every time so we don't
  1335  	// bother to check and cache the id here. We'll download first, then cache.
  1336  	if driverConfig.ForcePull {
  1337  		d.logger.Printf("[DEBUG] driver.docker: force pull image '%s:%s' instead of inspecting local", repo, tag)
  1338  	} else if tag != "latest" {
  1339  		if dockerImage, _ := client.InspectImage(image); dockerImage != nil {
  1340  			// Image exists so just increment its reference count
  1341  			coordinator.IncrementImageReference(dockerImage.ID, image, callerID)
  1342  			return dockerImage.ID, nil
  1343  		}
  1344  	}
  1345  
  1346  	// Load the image if specified
  1347  	if driverConfig.LoadImage != "" {
  1348  		return d.loadImage(driverConfig, client, taskDir)
  1349  	}
  1350  
  1351  	// Download the image
  1352  	return d.pullImage(driverConfig, client, repo, tag)
  1353  }
  1354  
  1355  // pullImage creates an image by pulling it from a docker registry
  1356  func (d *DockerDriver) pullImage(driverConfig *DockerDriverConfig, client *docker.Client, repo, tag string) (id string, err error) {
  1357  	authOptions, err := d.resolveRegistryAuthentication(driverConfig, repo)
  1358  	if err != nil {
  1359  		if d.driverConfig.AuthSoftFail {
  1360  			d.logger.Printf("[WARN] Failed to find docker auth for repo %q: %v", repo, err)
  1361  		} else {
  1362  			return "", fmt.Errorf("Failed to find docker auth for repo %q: %v", repo, err)
  1363  		}
  1364  	}
  1365  
  1366  	if authIsEmpty(authOptions) {
  1367  		d.logger.Printf("[DEBUG] driver.docker: did not find docker auth for repo %q", repo)
  1368  	}
  1369  
  1370  	d.emitEvent("Downloading image %s:%s", repo, tag)
  1371  	coordinator, callerID := d.getDockerCoordinator(client)
  1372  	return coordinator.PullImage(driverConfig.ImageName, authOptions, callerID)
  1373  }
  1374  
  1375  // authBackend encapsulates a function that resolves registry credentials.
  1376  type authBackend func(string) (*docker.AuthConfiguration, error)
  1377  
  1378  // resolveRegistryAuthentication attempts to retrieve auth credentials for the
  1379  // repo, trying all authentication-backends possible.
  1380  func (d *DockerDriver) resolveRegistryAuthentication(driverConfig *DockerDriverConfig, repo string) (*docker.AuthConfiguration, error) {
  1381  	return firstValidAuth(repo, []authBackend{
  1382  		authFromTaskConfig(driverConfig),
  1383  		authFromDockerConfig(d.config.Read("docker.auth.config")),
  1384  		authFromHelper(d.config.Read("docker.auth.helper")),
  1385  	})
  1386  }
  1387  
  1388  // loadImage creates an image by loading it from the file system
  1389  func (d *DockerDriver) loadImage(driverConfig *DockerDriverConfig, client *docker.Client,
  1390  	taskDir *allocdir.TaskDir) (id string, err error) {
  1391  
  1392  	archive := filepath.Join(taskDir.LocalDir, driverConfig.LoadImage)
  1393  	d.logger.Printf("[DEBUG] driver.docker: loading image from: %v", archive)
  1394  
  1395  	f, err := os.Open(archive)
  1396  	if err != nil {
  1397  		return "", fmt.Errorf("unable to open image archive: %v", err)
  1398  	}
  1399  
  1400  	if err := client.LoadImage(docker.LoadImageOptions{InputStream: f}); err != nil {
  1401  		return "", err
  1402  	}
  1403  	f.Close()
  1404  
  1405  	dockerImage, err := client.InspectImage(driverConfig.ImageName)
  1406  	if err != nil {
  1407  		return "", recoverableErrTimeouts(err)
  1408  	}
  1409  
  1410  	coordinator, callerID := d.getDockerCoordinator(client)
  1411  	coordinator.IncrementImageReference(dockerImage.ID, driverConfig.ImageName, callerID)
  1412  	return dockerImage.ID, nil
  1413  }
  1414  
  1415  // createContainer creates the container given the passed configuration. It
  1416  // attempts to handle any transient Docker errors.
  1417  func (d *DockerDriver) createContainer(config docker.CreateContainerOptions) (*docker.Container, error) {
  1418  	// Create a container
  1419  	attempted := 0
  1420  CREATE:
  1421  	container, createErr := client.CreateContainer(config)
  1422  	if createErr == nil {
  1423  		return container, nil
  1424  	}
  1425  
  1426  	d.logger.Printf("[DEBUG] driver.docker: failed to create container %q from image %q (ID: %q) (attempt %d): %v",
  1427  		config.Name, d.driverConfig.ImageName, d.imageID, attempted+1, createErr)
  1428  	if strings.Contains(strings.ToLower(createErr.Error()), "container already exists") {
  1429  		containers, err := client.ListContainers(docker.ListContainersOptions{
  1430  			All: true,
  1431  		})
  1432  		if err != nil {
  1433  			d.logger.Printf("[ERR] driver.docker: failed to query list of containers matching name:%s", config.Name)
  1434  			return nil, recoverableErrTimeouts(fmt.Errorf("Failed to query list of containers: %s", err))
  1435  		}
  1436  
  1437  		// Delete matching containers
  1438  		// Adding a / infront of the container name since Docker returns the
  1439  		// container names with a / pre-pended to the Nomad generated container names
  1440  		containerName := "/" + config.Name
  1441  		d.logger.Printf("[DEBUG] driver.docker: searching for container name %q to purge", containerName)
  1442  		for _, shimContainer := range containers {
  1443  			d.logger.Printf("[DEBUG] driver.docker: listed container %+v", shimContainer.Names)
  1444  			found := false
  1445  			for _, name := range shimContainer.Names {
  1446  				if name == containerName {
  1447  					d.logger.Printf("[DEBUG] driver.docker: Found container %v: %v", containerName, shimContainer.ID)
  1448  					found = true
  1449  					break
  1450  				}
  1451  			}
  1452  
  1453  			if !found {
  1454  				continue
  1455  			}
  1456  
  1457  			// Inspect the container and if the container isn't dead then return
  1458  			// the container
  1459  			container, err := client.InspectContainer(shimContainer.ID)
  1460  			if err != nil {
  1461  				err = fmt.Errorf("Failed to inspect container %s: %s", shimContainer.ID, err)
  1462  
  1463  				// This error is always recoverable as it could
  1464  				// be caused by races between listing
  1465  				// containers and this container being removed.
  1466  				// See #2802
  1467  				return nil, structs.NewRecoverableError(err, true)
  1468  			}
  1469  			if container != nil && container.State.Running {
  1470  				return container, nil
  1471  			}
  1472  
  1473  			err = client.RemoveContainer(docker.RemoveContainerOptions{
  1474  				ID:    container.ID,
  1475  				Force: true,
  1476  			})
  1477  			if err != nil {
  1478  				d.logger.Printf("[ERR] driver.docker: failed to purge container %s", container.ID)
  1479  				return nil, recoverableErrTimeouts(fmt.Errorf("Failed to purge container %s: %s", container.ID, err))
  1480  			} else if err == nil {
  1481  				d.logger.Printf("[INFO] driver.docker: purged container %s", container.ID)
  1482  			}
  1483  		}
  1484  
  1485  		if attempted < 5 {
  1486  			attempted++
  1487  			time.Sleep(1 * time.Second)
  1488  			goto CREATE
  1489  		}
  1490  	} else if strings.Contains(strings.ToLower(createErr.Error()), "no such image") {
  1491  		// There is still a very small chance this is possible even with the
  1492  		// coordinator so retry.
  1493  		return nil, structs.NewRecoverableError(createErr, true)
  1494  	}
  1495  
  1496  	return nil, recoverableErrTimeouts(createErr)
  1497  }
  1498  
  1499  // startContainer starts the passed container. It attempts to handle any
  1500  // transient Docker errors.
  1501  func (d *DockerDriver) startContainer(c *docker.Container) error {
  1502  	// Start a container
  1503  	attempted := 0
  1504  START:
  1505  	startErr := client.StartContainer(c.ID, c.HostConfig)
  1506  	if startErr == nil {
  1507  		return nil
  1508  	}
  1509  
  1510  	d.logger.Printf("[DEBUG] driver.docker: failed to start container %q (attempt %d): %v", c.ID, attempted+1, startErr)
  1511  
  1512  	// If it is a 500 error it is likely we can retry and be successful
  1513  	if strings.Contains(startErr.Error(), "API error (500)") {
  1514  		if attempted < 5 {
  1515  			attempted++
  1516  			time.Sleep(1 * time.Second)
  1517  			goto START
  1518  		}
  1519  		return structs.NewRecoverableError(startErr, true)
  1520  	}
  1521  
  1522  	return recoverableErrTimeouts(startErr)
  1523  }
  1524  
  1525  func (d *DockerDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {
  1526  	// Split the handle
  1527  	pidBytes := []byte(strings.TrimPrefix(handleID, "DOCKER:"))
  1528  	pid := &dockerPID{}
  1529  	if err := json.Unmarshal(pidBytes, pid); err != nil {
  1530  		return nil, fmt.Errorf("Failed to parse handle '%s': %v", handleID, err)
  1531  	}
  1532  	d.logger.Printf("[INFO] driver.docker: re-attaching to docker process: %s", pid.ContainerID)
  1533  	d.logger.Printf("[DEBUG] driver.docker: re-attached to handle: %s", handleID)
  1534  	pluginConfig := &plugin.ClientConfig{
  1535  		Reattach: pid.PluginConfig.PluginConfig(),
  1536  	}
  1537  
  1538  	client, waitClient, err := d.dockerClients()
  1539  	if err != nil {
  1540  		return nil, fmt.Errorf("Failed to connect to docker daemon: %s", err)
  1541  	}
  1542  
  1543  	// Look for a running container with this ID
  1544  	containers, err := client.ListContainers(docker.ListContainersOptions{
  1545  		Filters: map[string][]string{
  1546  			"id": {pid.ContainerID},
  1547  		},
  1548  	})
  1549  	if err != nil {
  1550  		return nil, fmt.Errorf("Failed to query for container %s: %v", pid.ContainerID, err)
  1551  	}
  1552  
  1553  	found := false
  1554  	for _, container := range containers {
  1555  		if container.ID == pid.ContainerID {
  1556  			found = true
  1557  		}
  1558  	}
  1559  	if !found {
  1560  		return nil, fmt.Errorf("Failed to find container %s", pid.ContainerID)
  1561  	}
  1562  	exec, pluginClient, err := createExecutorWithConfig(pluginConfig, d.config.LogOutput)
  1563  	if err != nil {
  1564  		d.logger.Printf("[INFO] driver.docker: couldn't re-attach to the plugin process: %v", err)
  1565  		d.logger.Printf("[DEBUG] driver.docker: stopping container %q", pid.ContainerID)
  1566  		if e := client.StopContainer(pid.ContainerID, uint(pid.KillTimeout.Seconds())); e != nil {
  1567  			d.logger.Printf("[DEBUG] driver.docker: couldn't stop container: %v", e)
  1568  		}
  1569  		return nil, err
  1570  	}
  1571  
  1572  	ver, _ := exec.Version()
  1573  	d.logger.Printf("[DEBUG] driver.docker: version of executor: %v", ver.Version)
  1574  
  1575  	// Increment the reference count since we successfully attached to this
  1576  	// container
  1577  	coordinator, callerID := d.getDockerCoordinator(client)
  1578  	coordinator.IncrementImageReference(pid.ImageID, pid.Image, callerID)
  1579  
  1580  	// Return a driver handle
  1581  	h := &DockerHandle{
  1582  		client:         client,
  1583  		waitClient:     waitClient,
  1584  		executor:       exec,
  1585  		pluginClient:   pluginClient,
  1586  		logger:         d.logger,
  1587  		Image:          pid.Image,
  1588  		ImageID:        pid.ImageID,
  1589  		containerID:    pid.ContainerID,
  1590  		version:        pid.Version,
  1591  		killTimeout:    pid.KillTimeout,
  1592  		maxKillTimeout: pid.MaxKillTimeout,
  1593  		doneCh:         make(chan bool),
  1594  		waitCh:         make(chan *dstructs.WaitResult, 1),
  1595  	}
  1596  	go h.collectStats()
  1597  	go h.run()
  1598  	return h, nil
  1599  }
  1600  
  1601  func (h *DockerHandle) ID() string {
  1602  	// Return a handle to the PID
  1603  	pid := dockerPID{
  1604  		Version:        h.version,
  1605  		ContainerID:    h.containerID,
  1606  		Image:          h.Image,
  1607  		ImageID:        h.ImageID,
  1608  		KillTimeout:    h.killTimeout,
  1609  		MaxKillTimeout: h.maxKillTimeout,
  1610  		PluginConfig:   NewPluginReattachConfig(h.pluginClient.ReattachConfig()),
  1611  	}
  1612  	data, err := json.Marshal(pid)
  1613  	if err != nil {
  1614  		h.logger.Printf("[ERR] driver.docker: failed to marshal docker PID to JSON: %s", err)
  1615  	}
  1616  	return fmt.Sprintf("DOCKER:%s", string(data))
  1617  }
  1618  
  1619  func (h *DockerHandle) ContainerID() string {
  1620  	return h.containerID
  1621  }
  1622  
  1623  func (h *DockerHandle) WaitCh() chan *dstructs.WaitResult {
  1624  	return h.waitCh
  1625  }
  1626  
  1627  func (h *DockerHandle) Update(task *structs.Task) error {
  1628  	// Store the updated kill timeout.
  1629  	h.killTimeout = GetKillTimeout(task.KillTimeout, h.maxKillTimeout)
  1630  	if err := h.executor.UpdateTask(task); err != nil {
  1631  		h.logger.Printf("[DEBUG] driver.docker: failed to update log config: %v", err)
  1632  	}
  1633  
  1634  	// Update is not possible
  1635  	return nil
  1636  }
  1637  
  1638  func (h *DockerHandle) Exec(ctx context.Context, cmd string, args []string) ([]byte, int, error) {
  1639  	fullCmd := make([]string, len(args)+1)
  1640  	fullCmd[0] = cmd
  1641  	copy(fullCmd[1:], args)
  1642  	createExecOpts := docker.CreateExecOptions{
  1643  		AttachStdin:  false,
  1644  		AttachStdout: true,
  1645  		AttachStderr: true,
  1646  		Tty:          false,
  1647  		Cmd:          fullCmd,
  1648  		Container:    h.containerID,
  1649  		Context:      ctx,
  1650  	}
  1651  	exec, err := h.client.CreateExec(createExecOpts)
  1652  	if err != nil {
  1653  		return nil, 0, err
  1654  	}
  1655  
  1656  	output, _ := circbuf.NewBuffer(int64(dstructs.CheckBufSize))
  1657  	startOpts := docker.StartExecOptions{
  1658  		Detach:       false,
  1659  		Tty:          false,
  1660  		OutputStream: output,
  1661  		ErrorStream:  output,
  1662  		Context:      ctx,
  1663  	}
  1664  	if err := client.StartExec(exec.ID, startOpts); err != nil {
  1665  		return nil, 0, err
  1666  	}
  1667  	res, err := client.InspectExec(exec.ID)
  1668  	if err != nil {
  1669  		return output.Bytes(), 0, err
  1670  	}
  1671  	return output.Bytes(), res.ExitCode, nil
  1672  }
  1673  
  1674  func (h *DockerHandle) Signal(s os.Signal) error {
  1675  	// Convert types
  1676  	sysSig, ok := s.(syscall.Signal)
  1677  	if !ok {
  1678  		return fmt.Errorf("Failed to determine signal number")
  1679  	}
  1680  
  1681  	// TODO When we expose signals we will need a mapping layer that converts
  1682  	// MacOS signals to the correct signal number for docker. Or we change the
  1683  	// interface to take a signal string and leave it up to driver to map?
  1684  
  1685  	dockerSignal := docker.Signal(sysSig)
  1686  	opts := docker.KillContainerOptions{
  1687  		ID:     h.containerID,
  1688  		Signal: dockerSignal,
  1689  	}
  1690  	return h.client.KillContainer(opts)
  1691  
  1692  }
  1693  
  1694  // Kill is used to terminate the task. This uses `docker stop -t killTimeout`
  1695  func (h *DockerHandle) Kill() error {
  1696  	// Stop the container
  1697  	err := h.client.StopContainer(h.containerID, uint(h.killTimeout.Seconds()))
  1698  	if err != nil {
  1699  		h.executor.Exit()
  1700  		h.pluginClient.Kill()
  1701  
  1702  		// Container has already been removed.
  1703  		if strings.Contains(err.Error(), NoSuchContainerError) {
  1704  			h.logger.Printf("[DEBUG] driver.docker: attempted to stop non-existent container %s", h.containerID)
  1705  			return nil
  1706  		}
  1707  		h.logger.Printf("[ERR] driver.docker: failed to stop container %s: %v", h.containerID, err)
  1708  		return fmt.Errorf("Failed to stop container %s: %s", h.containerID, err)
  1709  	}
  1710  	h.logger.Printf("[INFO] driver.docker: stopped container %s", h.containerID)
  1711  	return nil
  1712  }
  1713  
  1714  func (h *DockerHandle) Stats() (*cstructs.TaskResourceUsage, error) {
  1715  	h.resourceUsageLock.RLock()
  1716  	defer h.resourceUsageLock.RUnlock()
  1717  	var err error
  1718  	if h.resourceUsage == nil {
  1719  		err = fmt.Errorf("stats collection hasn't started yet")
  1720  	}
  1721  	return h.resourceUsage, err
  1722  }
  1723  
  1724  func (h *DockerHandle) run() {
  1725  	// Wait for it...
  1726  	exitCode, werr := h.waitClient.WaitContainer(h.containerID)
  1727  	if werr != nil {
  1728  		h.logger.Printf("[ERR] driver.docker: failed to wait for %s; container already terminated", h.containerID)
  1729  	}
  1730  
  1731  	if exitCode != 0 {
  1732  		werr = fmt.Errorf("Docker container exited with non-zero exit code: %d", exitCode)
  1733  	}
  1734  
  1735  	container, ierr := h.waitClient.InspectContainer(h.containerID)
  1736  	if ierr != nil {
  1737  		h.logger.Printf("[ERR] driver.docker: failed to inspect container %s: %v", h.containerID, ierr)
  1738  	} else if container.State.OOMKilled {
  1739  		werr = fmt.Errorf("OOM Killed")
  1740  	}
  1741  
  1742  	close(h.doneCh)
  1743  
  1744  	// Shutdown the syslog collector
  1745  	if err := h.executor.Exit(); err != nil {
  1746  		h.logger.Printf("[ERR] driver.docker: failed to kill the syslog collector: %v", err)
  1747  	}
  1748  	h.pluginClient.Kill()
  1749  
  1750  	// Stop the container just incase the docker daemon's wait returned
  1751  	// incorrectly
  1752  	if err := h.client.StopContainer(h.containerID, 0); err != nil {
  1753  		_, noSuchContainer := err.(*docker.NoSuchContainer)
  1754  		_, containerNotRunning := err.(*docker.ContainerNotRunning)
  1755  		if !containerNotRunning && !noSuchContainer {
  1756  			h.logger.Printf("[ERR] driver.docker: error stopping container: %v", err)
  1757  		}
  1758  	}
  1759  
  1760  	// Remove the container
  1761  	if err := h.client.RemoveContainer(docker.RemoveContainerOptions{ID: h.containerID, RemoveVolumes: true, Force: true}); err != nil {
  1762  		h.logger.Printf("[ERR] driver.docker: error removing container: %v", err)
  1763  	}
  1764  
  1765  	// Send the results
  1766  	h.waitCh <- dstructs.NewWaitResult(exitCode, 0, werr)
  1767  	close(h.waitCh)
  1768  }
  1769  
  1770  // collectStats starts collecting resource usage stats of a docker container
  1771  func (h *DockerHandle) collectStats() {
  1772  	statsCh := make(chan *docker.Stats)
  1773  	statsOpts := docker.StatsOptions{ID: h.containerID, Done: h.doneCh, Stats: statsCh, Stream: true}
  1774  	go func() {
  1775  		//TODO handle Stats error
  1776  		if err := h.waitClient.Stats(statsOpts); err != nil {
  1777  			h.logger.Printf("[DEBUG] driver.docker: error collecting stats from container %s: %v", h.containerID, err)
  1778  		}
  1779  	}()
  1780  	numCores := runtime.NumCPU()
  1781  	for {
  1782  		select {
  1783  		case s := <-statsCh:
  1784  			if s != nil {
  1785  				ms := &cstructs.MemoryStats{
  1786  					RSS:      s.MemoryStats.Stats.Rss,
  1787  					Cache:    s.MemoryStats.Stats.Cache,
  1788  					Swap:     s.MemoryStats.Stats.Swap,
  1789  					MaxUsage: s.MemoryStats.MaxUsage,
  1790  					Measured: DockerMeasuredMemStats,
  1791  				}
  1792  
  1793  				cs := &cstructs.CpuStats{
  1794  					ThrottledPeriods: s.CPUStats.ThrottlingData.ThrottledPeriods,
  1795  					ThrottledTime:    s.CPUStats.ThrottlingData.ThrottledTime,
  1796  					Measured:         DockerMeasuredCpuStats,
  1797  				}
  1798  
  1799  				// Calculate percentage
  1800  				cs.Percent = calculatePercent(
  1801  					s.CPUStats.CPUUsage.TotalUsage, s.PreCPUStats.CPUUsage.TotalUsage,
  1802  					s.CPUStats.SystemCPUUsage, s.PreCPUStats.SystemCPUUsage, numCores)
  1803  				cs.SystemMode = calculatePercent(
  1804  					s.CPUStats.CPUUsage.UsageInKernelmode, s.PreCPUStats.CPUUsage.UsageInKernelmode,
  1805  					s.CPUStats.CPUUsage.TotalUsage, s.PreCPUStats.CPUUsage.TotalUsage, numCores)
  1806  				cs.UserMode = calculatePercent(
  1807  					s.CPUStats.CPUUsage.UsageInUsermode, s.PreCPUStats.CPUUsage.UsageInUsermode,
  1808  					s.CPUStats.CPUUsage.TotalUsage, s.PreCPUStats.CPUUsage.TotalUsage, numCores)
  1809  				cs.TotalTicks = (cs.Percent / 100) * shelpers.TotalTicksAvailable() / float64(numCores)
  1810  
  1811  				h.resourceUsageLock.Lock()
  1812  				h.resourceUsage = &cstructs.TaskResourceUsage{
  1813  					ResourceUsage: &cstructs.ResourceUsage{
  1814  						MemoryStats: ms,
  1815  						CpuStats:    cs,
  1816  					},
  1817  					Timestamp: s.Read.UTC().UnixNano(),
  1818  				}
  1819  				h.resourceUsageLock.Unlock()
  1820  			}
  1821  		case <-h.doneCh:
  1822  			return
  1823  		}
  1824  	}
  1825  }
  1826  
  1827  func calculatePercent(newSample, oldSample, newTotal, oldTotal uint64, cores int) float64 {
  1828  	numerator := newSample - oldSample
  1829  	denom := newTotal - oldTotal
  1830  	if numerator <= 0 || denom <= 0 {
  1831  		return 0.0
  1832  	}
  1833  
  1834  	return (float64(numerator) / float64(denom)) * float64(cores) * 100.0
  1835  }
  1836  
  1837  // loadDockerConfig loads the docker config at the specified path, returning an
  1838  // error if it couldn't be read.
  1839  func loadDockerConfig(file string) (*configfile.ConfigFile, error) {
  1840  	f, err := os.Open(file)
  1841  	if err != nil {
  1842  		return nil, fmt.Errorf("Failed to open auth config file: %v, error: %v", file, err)
  1843  	}
  1844  	defer f.Close()
  1845  
  1846  	cfile := new(configfile.ConfigFile)
  1847  	if err = cfile.LoadFromReader(f); err != nil {
  1848  		return nil, fmt.Errorf("Failed to parse auth config file: %v", err)
  1849  	}
  1850  	return cfile, nil
  1851  }
  1852  
  1853  // parseRepositoryInfo takes a repo and returns the Docker RepositoryInfo. This
  1854  // is useful for interacting with a Docker config object.
  1855  func parseRepositoryInfo(repo string) (*registry.RepositoryInfo, error) {
  1856  	name, err := reference.ParseNamed(repo)
  1857  	if err != nil {
  1858  		return nil, fmt.Errorf("Failed to parse named repo %q: %v", repo, err)
  1859  	}
  1860  
  1861  	repoInfo, err := registry.ParseRepositoryInfo(name)
  1862  	if err != nil {
  1863  		return nil, fmt.Errorf("Failed to parse repository: %v", err)
  1864  	}
  1865  
  1866  	return repoInfo, nil
  1867  }
  1868  
  1869  // firstValidAuth tries a list of auth backends, returning first error or AuthConfiguration
  1870  func firstValidAuth(repo string, backends []authBackend) (*docker.AuthConfiguration, error) {
  1871  	for _, backend := range backends {
  1872  		auth, err := backend(repo)
  1873  		if auth != nil || err != nil {
  1874  			return auth, err
  1875  		}
  1876  	}
  1877  	return nil, nil
  1878  }
  1879  
  1880  // authFromTaskConfig generates an authBackend for any auth given in the task-configuration
  1881  func authFromTaskConfig(driverConfig *DockerDriverConfig) authBackend {
  1882  	return func(string) (*docker.AuthConfiguration, error) {
  1883  		if len(driverConfig.Auth) == 0 {
  1884  			return nil, nil
  1885  		}
  1886  		auth := driverConfig.Auth[0]
  1887  		return &docker.AuthConfiguration{
  1888  			Username:      auth.Username,
  1889  			Password:      auth.Password,
  1890  			Email:         auth.Email,
  1891  			ServerAddress: auth.ServerAddress,
  1892  		}, nil
  1893  	}
  1894  }
  1895  
  1896  // authFromDockerConfig generate an authBackend for a dockercfg-compatible file.
  1897  // The authBacken can either be from explicit auth definitions or via credential
  1898  // helpers
  1899  func authFromDockerConfig(file string) authBackend {
  1900  	return func(repo string) (*docker.AuthConfiguration, error) {
  1901  		if file == "" {
  1902  			return nil, nil
  1903  		}
  1904  		repoInfo, err := parseRepositoryInfo(repo)
  1905  		if err != nil {
  1906  			return nil, err
  1907  		}
  1908  
  1909  		cfile, err := loadDockerConfig(file)
  1910  		if err != nil {
  1911  			return nil, err
  1912  		}
  1913  
  1914  		return firstValidAuth(repo, []authBackend{
  1915  			func(string) (*docker.AuthConfiguration, error) {
  1916  				dockerAuthConfig := registry.ResolveAuthConfig(cfile.AuthConfigs, repoInfo.Index)
  1917  				auth := &docker.AuthConfiguration{
  1918  					Username:      dockerAuthConfig.Username,
  1919  					Password:      dockerAuthConfig.Password,
  1920  					Email:         dockerAuthConfig.Email,
  1921  					ServerAddress: dockerAuthConfig.ServerAddress,
  1922  				}
  1923  				if authIsEmpty(auth) {
  1924  					return nil, nil
  1925  				}
  1926  				return auth, nil
  1927  			},
  1928  			authFromHelper(cfile.CredentialHelpers[registry.GetAuthConfigKey(repoInfo.Index)]),
  1929  			authFromHelper(cfile.CredentialsStore),
  1930  		})
  1931  	}
  1932  }
  1933  
  1934  // authFromHelper generates an authBackend for a docker-credentials-helper;
  1935  // A script taking the requested domain on input, outputting JSON with
  1936  // "Username" and "Secret"
  1937  func authFromHelper(helperName string) authBackend {
  1938  	return func(repo string) (*docker.AuthConfiguration, error) {
  1939  		if helperName == "" {
  1940  			return nil, nil
  1941  		}
  1942  		helper := dockerAuthHelperPrefix + helperName
  1943  		cmd := exec.Command(helper, "get")
  1944  
  1945  		// Ensure that the HTTPs prefix exists
  1946  		if !strings.HasPrefix(repo, "https://") {
  1947  			repo = fmt.Sprintf("https://%s", repo)
  1948  		}
  1949  
  1950  		cmd.Stdin = strings.NewReader(repo)
  1951  
  1952  		output, err := cmd.Output()
  1953  		if err != nil {
  1954  			switch err.(type) {
  1955  			default:
  1956  				return nil, err
  1957  			case *exec.ExitError:
  1958  				return nil, fmt.Errorf("%s with input %q failed with stderr: %s", helper, repo, output)
  1959  			}
  1960  		}
  1961  
  1962  		var response map[string]string
  1963  		if err := json.Unmarshal(output, &response); err != nil {
  1964  			return nil, err
  1965  		}
  1966  
  1967  		auth := &docker.AuthConfiguration{
  1968  			Username: response["Username"],
  1969  			Password: response["Secret"],
  1970  		}
  1971  
  1972  		if authIsEmpty(auth) {
  1973  			return nil, nil
  1974  		}
  1975  		return auth, nil
  1976  	}
  1977  }
  1978  
  1979  // authIsEmpty returns if auth is nil or an empty structure
  1980  func authIsEmpty(auth *docker.AuthConfiguration) bool {
  1981  	if auth == nil {
  1982  		return false
  1983  	}
  1984  	return auth.Username == "" &&
  1985  		auth.Password == "" &&
  1986  		auth.Email == "" &&
  1987  		auth.ServerAddress == ""
  1988  }