github.com/quite/nomad@v0.8.6/client/driver/docker.go (about)

     1  package driver
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"fmt"
     7  	"log"
     8  	"net"
     9  	"os"
    10  	"os/exec"
    11  	"path/filepath"
    12  	"runtime"
    13  	"strconv"
    14  	"strings"
    15  	"sync"
    16  	"syscall"
    17  	"time"
    18  
    19  	"github.com/armon/circbuf"
    20  	"github.com/fsouza/go-dockerclient"
    21  
    22  	"github.com/docker/docker/cli/config/configfile"
    23  	"github.com/docker/docker/reference"
    24  	"github.com/docker/docker/registry"
    25  
    26  	"github.com/armon/go-metrics"
    27  	"github.com/hashicorp/go-multierror"
    28  	"github.com/hashicorp/go-plugin"
    29  	"github.com/hashicorp/nomad/client/allocdir"
    30  	"github.com/hashicorp/nomad/client/driver/env"
    31  	"github.com/hashicorp/nomad/client/driver/executor"
    32  	dstructs "github.com/hashicorp/nomad/client/driver/structs"
    33  	cstructs "github.com/hashicorp/nomad/client/structs"
    34  	"github.com/hashicorp/nomad/helper"
    35  	"github.com/hashicorp/nomad/helper/fields"
    36  	shelpers "github.com/hashicorp/nomad/helper/stats"
    37  	"github.com/hashicorp/nomad/nomad/structs"
    38  	"github.com/mitchellh/mapstructure"
    39  )
    40  
    41  var (
    42  	// createClientsLock is a lock that protects reading/writing global client
    43  	// variables
    44  	createClientsLock sync.Mutex
    45  
    46  	// client is a docker client with a timeout of 5 minutes. This is for doing
    47  	// all operations with the docker daemon besides which are not long running
    48  	// such as creating, killing containers, etc.
    49  	client *docker.Client
    50  
    51  	// waitClient is a docker client with no timeouts. This is used for long
    52  	// running operations such as waiting on containers and collect stats
    53  	waitClient *docker.Client
    54  
    55  	// healthCheckClient is a docker client with a timeout of 1 minute. This is
    56  	// necessary to have a shorter timeout than other API or fingerprint calls
    57  	healthCheckClient *docker.Client
    58  
    59  	// The statistics the Docker driver exposes
    60  	DockerMeasuredMemStats = []string{"RSS", "Cache", "Swap", "Max Usage"}
    61  	DockerMeasuredCpuStats = []string{"Throttled Periods", "Throttled Time", "Percent"}
    62  
    63  	// recoverableErrTimeouts returns a recoverable error if the error was due
    64  	// to timeouts
    65  	recoverableErrTimeouts = func(err error) error {
    66  		r := false
    67  		if strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") ||
    68  			strings.Contains(err.Error(), "EOF") {
    69  			r = true
    70  		}
    71  		return structs.NewRecoverableError(err, r)
    72  	}
    73  )
    74  
    75  const (
    76  	// NoSuchContainerError is returned by the docker daemon if the container
    77  	// does not exist.
    78  	NoSuchContainerError = "No such container"
    79  
    80  	// The key populated in Node Attributes to indicate presence of the Docker
    81  	// driver
    82  	dockerDriverAttr = "driver.docker"
    83  
    84  	// dockerSELinuxLabelConfigOption is the key for configuring the
    85  	// SELinux label for binds.
    86  	dockerSELinuxLabelConfigOption = "docker.volumes.selinuxlabel"
    87  
    88  	// dockerVolumesConfigOption is the key for enabling the use of custom
    89  	// bind volumes to arbitrary host paths.
    90  	dockerVolumesConfigOption  = "docker.volumes.enabled"
    91  	dockerVolumesConfigDefault = true
    92  
    93  	// dockerPrivilegedConfigOption is the key for running containers in
    94  	// Docker's privileged mode.
    95  	dockerPrivilegedConfigOption = "docker.privileged.enabled"
    96  
    97  	// dockerCleanupImageConfigOption is the key for whether or not to
    98  	// cleanup images after the task exits.
    99  	dockerCleanupImageConfigOption  = "docker.cleanup.image"
   100  	dockerCleanupImageConfigDefault = true
   101  
   102  	// dockerPullTimeoutConfigOption is the key for setting an images pull
   103  	// timeout
   104  	dockerImageRemoveDelayConfigOption  = "docker.cleanup.image.delay"
   105  	dockerImageRemoveDelayConfigDefault = 3 * time.Minute
   106  
   107  	// dockerCapsWhitelistConfigOption is the key for setting the list of
   108  	// allowed Linux capabilities
   109  	dockerCapsWhitelistConfigOption  = "docker.caps.whitelist"
   110  	dockerCapsWhitelistConfigDefault = dockerBasicCaps
   111  
   112  	// dockerTimeout is the length of time a request can be outstanding before
   113  	// it is timed out.
   114  	dockerTimeout = 5 * time.Minute
   115  
   116  	// dockerHealthCheckTimeout is the length of time a request for a health
   117  	// check client can be outstanding before it is timed out.
   118  	dockerHealthCheckTimeout = 1 * time.Minute
   119  
   120  	// dockerImageResKey is the CreatedResources key for docker images
   121  	dockerImageResKey = "image"
   122  
   123  	// dockerAuthHelperPrefix is the prefix to attach to the credential helper
   124  	// and should be found in the $PATH. Example: ${prefix-}${helper-name}
   125  	dockerAuthHelperPrefix = "docker-credential-"
   126  
   127  	// dockerBasicCaps is comma-separated list of Linux capabilities that are
   128  	// allowed by docker by default, as documented in
   129  	// https://docs.docker.com/engine/reference/run/#block-io-bandwidth-blkio-constraint
   130  	dockerBasicCaps = "CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID," +
   131  		"SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE"
   132  
   133  	// This is cpu.cfs_period_us: the length of a period.
   134  	// The default values is 100 milliseconds (ms) represented in microseconds (us).
   135  	// Below is the documentation:
   136  	// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
   137  	// https://docs.docker.com/engine/api/v1.35/#
   138  	defaultCFSPeriodUS = 100000
   139  
   140  	// dockerCleanupContainerConfigOption is the key for whether or not to
   141  	// remove containers after the task exits.
   142  	dockerCleanupContainerConfigOption  = "docker.cleanup.container"
   143  	dockerCleanupContainerConfigDefault = true
   144  )
   145  
   146  type DockerDriver struct {
   147  	DriverContext
   148  
   149  	driverConfig *DockerDriverConfig
   150  	imageID      string
   151  
   152  	// A tri-state boolean to know if the fingerprinting has happened and
   153  	// whether it has been successful
   154  	fingerprintSuccess *bool
   155  }
   156  
   157  type DockerDriverAuth struct {
   158  	Username      string `mapstructure:"username"`       // username for the registry
   159  	Password      string `mapstructure:"password"`       // password to access the registry
   160  	Email         string `mapstructure:"email"`          // email address of the user who is allowed to access the registry
   161  	ServerAddress string `mapstructure:"server_address"` // server address of the registry
   162  }
   163  
   164  type DockerLoggingOpts struct {
   165  	Type      string              `mapstructure:"type"`
   166  	ConfigRaw []map[string]string `mapstructure:"config"`
   167  	Config    map[string]string   `mapstructure:"-"`
   168  }
   169  
   170  type DockerMount struct {
   171  	Target        string                 `mapstructure:"target"`
   172  	Source        string                 `mapstructure:"source"`
   173  	ReadOnly      bool                   `mapstructure:"readonly"`
   174  	VolumeOptions []*DockerVolumeOptions `mapstructure:"volume_options"`
   175  }
   176  
   177  type DockerDevice struct {
   178  	HostPath          string `mapstructure:"host_path"`
   179  	ContainerPath     string `mapstructure:"container_path"`
   180  	CgroupPermissions string `mapstructure:"cgroup_permissions"`
   181  }
   182  
   183  type DockerVolumeOptions struct {
   184  	NoCopy       bool                       `mapstructure:"no_copy"`
   185  	Labels       []map[string]string        `mapstructure:"labels"`
   186  	DriverConfig []DockerVolumeDriverConfig `mapstructure:"driver_config"`
   187  }
   188  
   189  // VolumeDriverConfig holds a map of volume driver specific options
   190  type DockerVolumeDriverConfig struct {
   191  	Name    string              `mapstructure:"name"`
   192  	Options []map[string]string `mapstructure:"options"`
   193  }
   194  
   195  // DockerDriverConfig defines the user specified config block in a jobspec
   196  type DockerDriverConfig struct {
   197  	ImageName            string              `mapstructure:"image"`                  // Container's Image Name
   198  	LoadImage            string              `mapstructure:"load"`                   // LoadImage is a path to an image archive file
   199  	Command              string              `mapstructure:"command"`                // The Command to run when the container starts up
   200  	Args                 []string            `mapstructure:"args"`                   // The arguments to the Command
   201  	Entrypoint           []string            `mapstructure:"entrypoint"`             // Override the containers entrypoint
   202  	IpcMode              string              `mapstructure:"ipc_mode"`               // The IPC mode of the container - host and none
   203  	NetworkMode          string              `mapstructure:"network_mode"`           // The network mode of the container - host, nat and none
   204  	NetworkAliases       []string            `mapstructure:"network_aliases"`        // The network-scoped alias for the container
   205  	IPv4Address          string              `mapstructure:"ipv4_address"`           // The container ipv4 address
   206  	IPv6Address          string              `mapstructure:"ipv6_address"`           // the container ipv6 address
   207  	PidMode              string              `mapstructure:"pid_mode"`               // The PID mode of the container - host and none
   208  	UTSMode              string              `mapstructure:"uts_mode"`               // The UTS mode of the container - host and none
   209  	UsernsMode           string              `mapstructure:"userns_mode"`            // The User namespace mode of the container - host and none
   210  	PortMapRaw           []map[string]string `mapstructure:"port_map"`               //
   211  	PortMap              map[string]int      `mapstructure:"-"`                      // A map of host port labels and the ports exposed on the container
   212  	Privileged           bool                `mapstructure:"privileged"`             // Flag to run the container in privileged mode
   213  	SysctlRaw            []map[string]string `mapstructure:"sysctl"`                 //
   214  	Sysctl               map[string]string   `mapstructure:"-"`                      // The sysctl custom configurations
   215  	UlimitRaw            []map[string]string `mapstructure:"ulimit"`                 //
   216  	Ulimit               []docker.ULimit     `mapstructure:"-"`                      // The ulimit custom configurations
   217  	DNSServers           []string            `mapstructure:"dns_servers"`            // DNS Server for containers
   218  	DNSSearchDomains     []string            `mapstructure:"dns_search_domains"`     // DNS Search domains for containers
   219  	DNSOptions           []string            `mapstructure:"dns_options"`            // DNS Options
   220  	ExtraHosts           []string            `mapstructure:"extra_hosts"`            // Add host to /etc/hosts (host:IP)
   221  	Hostname             string              `mapstructure:"hostname"`               // Hostname for containers
   222  	LabelsRaw            []map[string]string `mapstructure:"labels"`                 //
   223  	Labels               map[string]string   `mapstructure:"-"`                      // Labels to set when the container starts up
   224  	Auth                 []DockerDriverAuth  `mapstructure:"auth"`                   // Authentication credentials for a private Docker registry
   225  	AuthSoftFail         bool                `mapstructure:"auth_soft_fail"`         // Soft-fail if auth creds are provided but fail
   226  	TTY                  bool                `mapstructure:"tty"`                    // Allocate a Pseudo-TTY
   227  	Interactive          bool                `mapstructure:"interactive"`            // Keep STDIN open even if not attached
   228  	ShmSize              int64               `mapstructure:"shm_size"`               // Size of /dev/shm of the container in bytes
   229  	WorkDir              string              `mapstructure:"work_dir"`               // Working directory inside the container
   230  	Logging              []DockerLoggingOpts `mapstructure:"logging"`                // Logging options for syslog server
   231  	Volumes              []string            `mapstructure:"volumes"`                // Host-Volumes to mount in, syntax: /path/to/host/directory:/destination/path/in/container
   232  	Mounts               []DockerMount       `mapstructure:"mounts"`                 // Docker volumes to mount
   233  	VolumeDriver         string              `mapstructure:"volume_driver"`          // Docker volume driver used for the container's volumes
   234  	ForcePull            bool                `mapstructure:"force_pull"`             // Always force pull before running image, useful if your tags are mutable
   235  	MacAddress           string              `mapstructure:"mac_address"`            // Pin mac address to container
   236  	SecurityOpt          []string            `mapstructure:"security_opt"`           // Flags to pass directly to security-opt
   237  	Devices              []DockerDevice      `mapstructure:"devices"`                // To allow mounting USB or other serial control devices
   238  	CapAdd               []string            `mapstructure:"cap_add"`                // Flags to pass directly to cap-add
   239  	CapDrop              []string            `mapstructure:"cap_drop"`               // Flags to pass directly to cap-drop
   240  	ReadonlyRootfs       bool                `mapstructure:"readonly_rootfs"`        // Mount the container’s root filesystem as read only
   241  	AdvertiseIPv6Address bool                `mapstructure:"advertise_ipv6_address"` // Flag to use the GlobalIPv6Address from the container as the detected IP
   242  	CPUHardLimit         bool                `mapstructure:"cpu_hard_limit"`         // Enforce CPU hard limit.
   243  	PidsLimit            int64               `mapstructure:"pids_limit"`             // Enforce Docker Pids limit
   244  }
   245  
   246  func sliceMergeUlimit(ulimitsRaw map[string]string) ([]docker.ULimit, error) {
   247  	var ulimits []docker.ULimit
   248  
   249  	for name, ulimitRaw := range ulimitsRaw {
   250  		if len(ulimitRaw) == 0 {
   251  			return []docker.ULimit{}, fmt.Errorf("Malformed ulimit specification %v: %q, cannot be empty", name, ulimitRaw)
   252  		}
   253  		// hard limit is optional
   254  		if strings.Contains(ulimitRaw, ":") == false {
   255  			ulimitRaw = ulimitRaw + ":" + ulimitRaw
   256  		}
   257  
   258  		splitted := strings.SplitN(ulimitRaw, ":", 2)
   259  		if len(splitted) < 2 {
   260  			return []docker.ULimit{}, fmt.Errorf("Malformed ulimit specification %v: %v", name, ulimitRaw)
   261  		}
   262  		soft, err := strconv.Atoi(splitted[0])
   263  		if err != nil {
   264  			return []docker.ULimit{}, fmt.Errorf("Malformed soft ulimit %v: %v", name, ulimitRaw)
   265  		}
   266  		hard, err := strconv.Atoi(splitted[1])
   267  		if err != nil {
   268  			return []docker.ULimit{}, fmt.Errorf("Malformed hard ulimit %v: %v", name, ulimitRaw)
   269  		}
   270  
   271  		ulimit := docker.ULimit{
   272  			Name: name,
   273  			Soft: int64(soft),
   274  			Hard: int64(hard),
   275  		}
   276  		ulimits = append(ulimits, ulimit)
   277  	}
   278  	return ulimits, nil
   279  }
   280  
   281  // Validate validates a docker driver config
   282  func (c *DockerDriverConfig) Validate() error {
   283  	if c.ImageName == "" {
   284  		return fmt.Errorf("Docker Driver needs an image name")
   285  	}
   286  	if len(c.Devices) > 0 {
   287  		for _, dev := range c.Devices {
   288  			if dev.HostPath == "" {
   289  				return fmt.Errorf("host path must be set in configuration for devices")
   290  			}
   291  			if dev.CgroupPermissions != "" {
   292  				for _, c := range dev.CgroupPermissions {
   293  					ch := string(c)
   294  					if ch != "r" && ch != "w" && ch != "m" {
   295  						return fmt.Errorf("invalid cgroup permission string: %q", dev.CgroupPermissions)
   296  					}
   297  				}
   298  			}
   299  		}
   300  	}
   301  	c.Sysctl = mapMergeStrStr(c.SysctlRaw...)
   302  	c.Labels = mapMergeStrStr(c.LabelsRaw...)
   303  	if len(c.Logging) > 0 {
   304  		c.Logging[0].Config = mapMergeStrStr(c.Logging[0].ConfigRaw...)
   305  	}
   306  
   307  	mergedUlimitsRaw := mapMergeStrStr(c.UlimitRaw...)
   308  	ulimit, err := sliceMergeUlimit(mergedUlimitsRaw)
   309  	if err != nil {
   310  		return err
   311  	}
   312  	c.Ulimit = ulimit
   313  	return nil
   314  }
   315  
   316  // NewDockerDriverConfig returns a docker driver config by parsing the HCL
   317  // config
   318  func NewDockerDriverConfig(task *structs.Task, env *env.TaskEnv) (*DockerDriverConfig, error) {
   319  	var dconf DockerDriverConfig
   320  
   321  	if err := mapstructure.WeakDecode(task.Config, &dconf); err != nil {
   322  		return nil, err
   323  	}
   324  
   325  	// Interpolate everything that is a string
   326  	dconf.ImageName = env.ReplaceEnv(dconf.ImageName)
   327  	dconf.Command = env.ReplaceEnv(dconf.Command)
   328  	dconf.Entrypoint = env.ParseAndReplace(dconf.Entrypoint)
   329  	dconf.IpcMode = env.ReplaceEnv(dconf.IpcMode)
   330  	dconf.NetworkMode = env.ReplaceEnv(dconf.NetworkMode)
   331  	dconf.NetworkAliases = env.ParseAndReplace(dconf.NetworkAliases)
   332  	dconf.IPv4Address = env.ReplaceEnv(dconf.IPv4Address)
   333  	dconf.IPv6Address = env.ReplaceEnv(dconf.IPv6Address)
   334  	dconf.PidMode = env.ReplaceEnv(dconf.PidMode)
   335  	dconf.UTSMode = env.ReplaceEnv(dconf.UTSMode)
   336  	dconf.Hostname = env.ReplaceEnv(dconf.Hostname)
   337  	dconf.WorkDir = env.ReplaceEnv(dconf.WorkDir)
   338  	dconf.LoadImage = env.ReplaceEnv(dconf.LoadImage)
   339  	dconf.Volumes = env.ParseAndReplace(dconf.Volumes)
   340  	dconf.VolumeDriver = env.ReplaceEnv(dconf.VolumeDriver)
   341  	dconf.DNSServers = env.ParseAndReplace(dconf.DNSServers)
   342  	dconf.DNSSearchDomains = env.ParseAndReplace(dconf.DNSSearchDomains)
   343  	dconf.DNSOptions = env.ParseAndReplace(dconf.DNSOptions)
   344  	dconf.ExtraHosts = env.ParseAndReplace(dconf.ExtraHosts)
   345  	dconf.MacAddress = env.ReplaceEnv(dconf.MacAddress)
   346  	dconf.SecurityOpt = env.ParseAndReplace(dconf.SecurityOpt)
   347  	dconf.CapAdd = env.ParseAndReplace(dconf.CapAdd)
   348  	dconf.CapDrop = env.ParseAndReplace(dconf.CapDrop)
   349  
   350  	for _, m := range dconf.SysctlRaw {
   351  		for k, v := range m {
   352  			delete(m, k)
   353  			m[env.ReplaceEnv(k)] = env.ReplaceEnv(v)
   354  		}
   355  	}
   356  
   357  	for _, m := range dconf.UlimitRaw {
   358  		for k, v := range m {
   359  			delete(m, k)
   360  			m[env.ReplaceEnv(k)] = env.ReplaceEnv(v)
   361  		}
   362  	}
   363  
   364  	for _, m := range dconf.LabelsRaw {
   365  		for k, v := range m {
   366  			delete(m, k)
   367  			m[env.ReplaceEnv(k)] = env.ReplaceEnv(v)
   368  		}
   369  	}
   370  	dconf.Labels = mapMergeStrStr(dconf.LabelsRaw...)
   371  
   372  	for i, a := range dconf.Auth {
   373  		dconf.Auth[i].Username = env.ReplaceEnv(a.Username)
   374  		dconf.Auth[i].Password = env.ReplaceEnv(a.Password)
   375  		dconf.Auth[i].Email = env.ReplaceEnv(a.Email)
   376  		dconf.Auth[i].ServerAddress = env.ReplaceEnv(a.ServerAddress)
   377  	}
   378  
   379  	for i, l := range dconf.Logging {
   380  		dconf.Logging[i].Type = env.ReplaceEnv(l.Type)
   381  		for _, c := range l.ConfigRaw {
   382  			for k, v := range c {
   383  				delete(c, k)
   384  				c[env.ReplaceEnv(k)] = env.ReplaceEnv(v)
   385  			}
   386  		}
   387  	}
   388  
   389  	for i, m := range dconf.Mounts {
   390  		dconf.Mounts[i].Target = env.ReplaceEnv(m.Target)
   391  		dconf.Mounts[i].Source = env.ReplaceEnv(m.Source)
   392  
   393  		if len(m.VolumeOptions) > 1 {
   394  			return nil, fmt.Errorf("Only one volume_options stanza allowed")
   395  		}
   396  
   397  		if len(m.VolumeOptions) == 1 {
   398  			vo := m.VolumeOptions[0]
   399  			if len(vo.Labels) > 1 {
   400  				return nil, fmt.Errorf("labels may only be specified once in volume_options stanza")
   401  			}
   402  
   403  			if len(vo.Labels) == 1 {
   404  				for k, v := range vo.Labels[0] {
   405  					if k != env.ReplaceEnv(k) {
   406  						delete(vo.Labels[0], k)
   407  					}
   408  					vo.Labels[0][env.ReplaceEnv(k)] = env.ReplaceEnv(v)
   409  				}
   410  			}
   411  
   412  			if len(vo.DriverConfig) > 1 {
   413  				return nil, fmt.Errorf("volume driver config may only be specified once")
   414  			}
   415  			if len(vo.DriverConfig) == 1 {
   416  				vo.DriverConfig[0].Name = env.ReplaceEnv(vo.DriverConfig[0].Name)
   417  				if len(vo.DriverConfig[0].Options) > 1 {
   418  					return nil, fmt.Errorf("volume driver options may only be specified once")
   419  				}
   420  
   421  				if len(vo.DriverConfig[0].Options) == 1 {
   422  					options := vo.DriverConfig[0].Options[0]
   423  					for k, v := range options {
   424  						if k != env.ReplaceEnv(k) {
   425  							delete(options, k)
   426  						}
   427  						options[env.ReplaceEnv(k)] = env.ReplaceEnv(v)
   428  					}
   429  				}
   430  			}
   431  		}
   432  	}
   433  
   434  	if len(dconf.Logging) > 0 {
   435  		dconf.Logging[0].Config = mapMergeStrStr(dconf.Logging[0].ConfigRaw...)
   436  	}
   437  
   438  	portMap := make(map[string]int)
   439  	for _, m := range dconf.PortMapRaw {
   440  		for k, v := range m {
   441  			ki, vi := env.ReplaceEnv(k), env.ReplaceEnv(v)
   442  			p, err := strconv.Atoi(vi)
   443  			if err != nil {
   444  				return nil, fmt.Errorf("failed to parse port map value %v to %v: %v", ki, vi, err)
   445  			}
   446  			portMap[ki] = p
   447  		}
   448  	}
   449  	dconf.PortMap = portMap
   450  
   451  	// Remove any http
   452  	if strings.Contains(dconf.ImageName, "https://") {
   453  		dconf.ImageName = strings.Replace(dconf.ImageName, "https://", "", 1)
   454  	}
   455  
   456  	// If devices are configured set default cgroup permissions
   457  	if len(dconf.Devices) > 0 {
   458  		for i, dev := range dconf.Devices {
   459  			if dev.CgroupPermissions == "" {
   460  				dev.CgroupPermissions = "rwm"
   461  			}
   462  			dconf.Devices[i] = dev
   463  		}
   464  	}
   465  
   466  	if err := dconf.Validate(); err != nil {
   467  		return nil, err
   468  	}
   469  	return &dconf, nil
   470  }
   471  
   472  type dockerPID struct {
   473  	Version        string
   474  	Image          string
   475  	ImageID        string
   476  	ContainerID    string
   477  	KillTimeout    time.Duration
   478  	MaxKillTimeout time.Duration
   479  	PluginConfig   *PluginReattachConfig
   480  }
   481  
   482  type DockerHandle struct {
   483  	pluginClient          *plugin.Client
   484  	executor              executor.Executor
   485  	client                *docker.Client
   486  	waitClient            *docker.Client
   487  	logger                *log.Logger
   488  	jobName               string
   489  	taskGroupName         string
   490  	taskName              string
   491  	Image                 string
   492  	ImageID               string
   493  	containerID           string
   494  	version               string
   495  	killTimeout           time.Duration
   496  	maxKillTimeout        time.Duration
   497  	resourceUsageLock     sync.RWMutex
   498  	resourceUsage         *cstructs.TaskResourceUsage
   499  	waitCh                chan *dstructs.WaitResult
   500  	doneCh                chan bool
   501  	removeContainerOnExit bool
   502  }
   503  
   504  func NewDockerDriver(ctx *DriverContext) Driver {
   505  	return &DockerDriver{DriverContext: *ctx}
   506  }
   507  
   508  func (d *DockerDriver) Fingerprint(req *cstructs.FingerprintRequest, resp *cstructs.FingerprintResponse) error {
   509  	client, _, err := d.dockerClients()
   510  	if err != nil {
   511  		if d.fingerprintSuccess == nil || *d.fingerprintSuccess {
   512  			d.logger.Printf("[INFO] driver.docker: failed to initialize client: %s", err)
   513  		}
   514  		d.fingerprintSuccess = helper.BoolToPtr(false)
   515  		return nil
   516  	}
   517  
   518  	// This is the first operation taken on the client so we'll try to
   519  	// establish a connection to the Docker daemon. If this fails it means
   520  	// Docker isn't available so we'll simply disable the docker driver.
   521  	env, err := client.Version()
   522  	if err != nil {
   523  		if d.fingerprintSuccess == nil || *d.fingerprintSuccess {
   524  			d.logger.Printf("[DEBUG] driver.docker: could not connect to docker daemon at %s: %s", client.Endpoint(), err)
   525  		}
   526  		d.fingerprintSuccess = helper.BoolToPtr(false)
   527  		resp.RemoveAttribute(dockerDriverAttr)
   528  		return nil
   529  	}
   530  
   531  	resp.AddAttribute(dockerDriverAttr, "1")
   532  	resp.AddAttribute("driver.docker.version", env.Get("Version"))
   533  	resp.Detected = true
   534  
   535  	privileged := d.config.ReadBoolDefault(dockerPrivilegedConfigOption, false)
   536  	if privileged {
   537  		resp.AddAttribute(dockerPrivilegedConfigOption, "1")
   538  	}
   539  
   540  	// Advertise if this node supports Docker volumes
   541  	if d.config.ReadBoolDefault(dockerVolumesConfigOption, dockerVolumesConfigDefault) {
   542  		resp.AddAttribute("driver."+dockerVolumesConfigOption, "1")
   543  	}
   544  
   545  	// Detect bridge IP address - #2785
   546  	if nets, err := client.ListNetworks(); err != nil {
   547  		d.logger.Printf("[WARN] driver.docker: error discovering bridge IP: %v", err)
   548  	} else {
   549  		for _, n := range nets {
   550  			if n.Name != "bridge" {
   551  				continue
   552  			}
   553  
   554  			if len(n.IPAM.Config) == 0 {
   555  				d.logger.Printf("[WARN] driver.docker: no IPAM config for bridge network")
   556  				break
   557  			}
   558  
   559  			if n.IPAM.Config[0].Gateway != "" {
   560  				resp.AddAttribute("driver.docker.bridge_ip", n.IPAM.Config[0].Gateway)
   561  			} else if d.fingerprintSuccess == nil {
   562  				// Docker 17.09.0-ce dropped the Gateway IP from the bridge network
   563  				// See https://github.com/moby/moby/issues/32648
   564  				d.logger.Printf("[DEBUG] driver.docker: bridge_ip could not be discovered")
   565  			}
   566  			break
   567  		}
   568  	}
   569  
   570  	d.fingerprintSuccess = helper.BoolToPtr(true)
   571  	return nil
   572  }
   573  
   574  // HealthCheck implements the interface for the HealthCheck interface. This
   575  // performs a health check on the docker driver, asserting whether the docker
   576  // driver is responsive to a `docker ps` command.
   577  func (d *DockerDriver) HealthCheck(req *cstructs.HealthCheckRequest, resp *cstructs.HealthCheckResponse) error {
   578  	dinfo := &structs.DriverInfo{
   579  		UpdateTime: time.Now(),
   580  	}
   581  
   582  	healthCheckClient, err := d.dockerHealthCheckClient()
   583  	if err != nil {
   584  		d.logger.Printf("[WARN] driver.docker: failed to retrieve Docker client in the process of a docker health check: %v", err)
   585  		dinfo.HealthDescription = fmt.Sprintf("Failed retrieving Docker client: %v", err)
   586  		resp.AddDriverInfo("docker", dinfo)
   587  		return nil
   588  	}
   589  
   590  	_, err = healthCheckClient.ListContainers(docker.ListContainersOptions{All: false})
   591  	if err != nil {
   592  		d.logger.Printf("[WARN] driver.docker: failed to list Docker containers in the process of a Docker health check: %v", err)
   593  		dinfo.HealthDescription = fmt.Sprintf("Failed to list Docker containers: %v", err)
   594  		resp.AddDriverInfo("docker", dinfo)
   595  		return nil
   596  	}
   597  
   598  	d.logger.Printf("[TRACE] driver.docker: docker driver is available and is responsive to `docker ps`")
   599  	dinfo.Healthy = true
   600  	dinfo.HealthDescription = "Driver is available and responsive"
   601  	resp.AddDriverInfo("docker", dinfo)
   602  	return nil
   603  }
   604  
   605  // GetHealthChecks implements the interface for the HealthCheck interface. This
   606  // sets whether the driver is eligible for periodic health checks and the
   607  // interval at which to do them.
   608  func (d *DockerDriver) GetHealthCheckInterval(req *cstructs.HealthCheckIntervalRequest, resp *cstructs.HealthCheckIntervalResponse) error {
   609  	resp.Eligible = true
   610  	resp.Period = 1 * time.Minute
   611  	return nil
   612  }
   613  
   614  // Validate is used to validate the driver configuration
   615  func (d *DockerDriver) Validate(config map[string]interface{}) error {
   616  	fd := &fields.FieldData{
   617  		Raw: config,
   618  		Schema: map[string]*fields.FieldSchema{
   619  			"image": {
   620  				Type:     fields.TypeString,
   621  				Required: true,
   622  			},
   623  			"load": {
   624  				Type: fields.TypeString,
   625  			},
   626  			"command": {
   627  				Type: fields.TypeString,
   628  			},
   629  			"args": {
   630  				Type: fields.TypeArray,
   631  			},
   632  			"entrypoint": {
   633  				Type: fields.TypeArray,
   634  			},
   635  			"ipc_mode": {
   636  				Type: fields.TypeString,
   637  			},
   638  			"network_mode": {
   639  				Type: fields.TypeString,
   640  			},
   641  			"network_aliases": {
   642  				Type: fields.TypeArray,
   643  			},
   644  			"ipv4_address": {
   645  				Type: fields.TypeString,
   646  			},
   647  			"ipv6_address": {
   648  				Type: fields.TypeString,
   649  			},
   650  			"mac_address": {
   651  				Type: fields.TypeString,
   652  			},
   653  			"pid_mode": {
   654  				Type: fields.TypeString,
   655  			},
   656  			"uts_mode": {
   657  				Type: fields.TypeString,
   658  			},
   659  			"userns_mode": {
   660  				Type: fields.TypeString,
   661  			},
   662  			"sysctl": {
   663  				Type: fields.TypeArray,
   664  			},
   665  			"ulimit": {
   666  				Type: fields.TypeArray,
   667  			},
   668  			"port_map": {
   669  				Type: fields.TypeArray,
   670  			},
   671  			"privileged": {
   672  				Type: fields.TypeBool,
   673  			},
   674  			"dns_servers": {
   675  				Type: fields.TypeArray,
   676  			},
   677  			"dns_options": {
   678  				Type: fields.TypeArray,
   679  			},
   680  			"dns_search_domains": {
   681  				Type: fields.TypeArray,
   682  			},
   683  			"extra_hosts": {
   684  				Type: fields.TypeArray,
   685  			},
   686  			"hostname": {
   687  				Type: fields.TypeString,
   688  			},
   689  			"labels": {
   690  				Type: fields.TypeArray,
   691  			},
   692  			"auth": {
   693  				Type: fields.TypeArray,
   694  			},
   695  			"auth_soft_fail": {
   696  				Type: fields.TypeBool,
   697  			},
   698  			// COMPAT: Remove in 0.6.0. SSL is no longer needed
   699  			"ssl": {
   700  				Type: fields.TypeBool,
   701  			},
   702  			"tty": {
   703  				Type: fields.TypeBool,
   704  			},
   705  			"interactive": {
   706  				Type: fields.TypeBool,
   707  			},
   708  			"shm_size": {
   709  				Type: fields.TypeInt,
   710  			},
   711  			"work_dir": {
   712  				Type: fields.TypeString,
   713  			},
   714  			"logging": {
   715  				Type: fields.TypeArray,
   716  			},
   717  			"volumes": {
   718  				Type: fields.TypeArray,
   719  			},
   720  			"volume_driver": {
   721  				Type: fields.TypeString,
   722  			},
   723  			"mounts": {
   724  				Type: fields.TypeArray,
   725  			},
   726  			"force_pull": {
   727  				Type: fields.TypeBool,
   728  			},
   729  			"security_opt": {
   730  				Type: fields.TypeArray,
   731  			},
   732  			"devices": {
   733  				Type: fields.TypeArray,
   734  			},
   735  			"cap_add": {
   736  				Type: fields.TypeArray,
   737  			},
   738  			"cap_drop": {
   739  				Type: fields.TypeArray,
   740  			},
   741  			"readonly_rootfs": {
   742  				Type: fields.TypeBool,
   743  			},
   744  			"advertise_ipv6_address": {
   745  				Type: fields.TypeBool,
   746  			},
   747  			"cpu_hard_limit": {
   748  				Type: fields.TypeBool,
   749  			},
   750  			"pids_limit": {
   751  				Type: fields.TypeInt,
   752  			},
   753  		},
   754  	}
   755  
   756  	if err := fd.Validate(); err != nil {
   757  		return err
   758  	}
   759  
   760  	return nil
   761  }
   762  
   763  func (d *DockerDriver) Abilities() DriverAbilities {
   764  	return DriverAbilities{
   765  		SendSignals: true,
   766  		Exec:        true,
   767  	}
   768  }
   769  
   770  func (d *DockerDriver) FSIsolation() cstructs.FSIsolation {
   771  	return cstructs.FSIsolationImage
   772  }
   773  
   774  // getDockerCoordinator returns the docker coordinator and the caller ID to use when
   775  // interacting with the coordinator
   776  func (d *DockerDriver) getDockerCoordinator(client *docker.Client) (*dockerCoordinator, string) {
   777  	config := &dockerCoordinatorConfig{
   778  		client:      client,
   779  		cleanup:     d.config.ReadBoolDefault(dockerCleanupImageConfigOption, dockerCleanupImageConfigDefault),
   780  		logger:      d.logger,
   781  		removeDelay: d.config.ReadDurationDefault(dockerImageRemoveDelayConfigOption, dockerImageRemoveDelayConfigDefault),
   782  	}
   783  
   784  	return GetDockerCoordinator(config), fmt.Sprintf("%s-%s", d.DriverContext.allocID, d.DriverContext.taskName)
   785  }
   786  
   787  func (d *DockerDriver) Prestart(ctx *ExecContext, task *structs.Task) (*PrestartResponse, error) {
   788  	driverConfig, err := NewDockerDriverConfig(task, ctx.TaskEnv)
   789  	if err != nil {
   790  		return nil, err
   791  	}
   792  
   793  	// Set state needed by Start
   794  	d.driverConfig = driverConfig
   795  
   796  	// Initialize docker API clients
   797  	client, _, err := d.dockerClients()
   798  	if err != nil {
   799  		return nil, fmt.Errorf("Failed to connect to docker daemon: %s", err)
   800  	}
   801  
   802  	// Ensure the image is available
   803  	id, err := d.createImage(driverConfig, client, ctx.TaskDir)
   804  	if err != nil {
   805  		return nil, err
   806  	}
   807  	d.imageID = id
   808  
   809  	resp := NewPrestartResponse()
   810  	resp.CreatedResources.Add(dockerImageResKey, id)
   811  
   812  	// Return the PortMap if it's set
   813  	if len(driverConfig.PortMap) > 0 {
   814  		resp.Network = &cstructs.DriverNetwork{
   815  			PortMap: driverConfig.PortMap,
   816  		}
   817  	}
   818  	return resp, nil
   819  }
   820  
   821  func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (*StartResponse, error) {
   822  	pluginLogFile := filepath.Join(ctx.TaskDir.Dir, "executor.out")
   823  	executorConfig := &dstructs.ExecutorConfig{
   824  		LogFile:  pluginLogFile,
   825  		LogLevel: d.config.LogLevel,
   826  	}
   827  
   828  	exec, pluginClient, err := createExecutor(d.config.LogOutput, d.config, executorConfig)
   829  	if err != nil {
   830  		return nil, err
   831  	}
   832  	executorCtx := &executor.ExecutorContext{
   833  		TaskEnv:        ctx.TaskEnv,
   834  		Task:           task,
   835  		Driver:         "docker",
   836  		LogDir:         ctx.TaskDir.LogDir,
   837  		TaskDir:        ctx.TaskDir.Dir,
   838  		PortLowerBound: d.config.ClientMinPort,
   839  		PortUpperBound: d.config.ClientMaxPort,
   840  	}
   841  	if err := exec.SetContext(executorCtx); err != nil {
   842  		pluginClient.Kill()
   843  		return nil, fmt.Errorf("failed to set executor context: %v", err)
   844  	}
   845  
   846  	// The user hasn't specified any logging options so launch our own syslog
   847  	// server if possible.
   848  	syslogAddr := ""
   849  	if len(d.driverConfig.Logging) == 0 {
   850  		if runtime.GOOS == "darwin" {
   851  			d.logger.Printf("[DEBUG] driver.docker: disabling syslog driver as Docker for Mac workaround")
   852  		} else {
   853  			ss, err := exec.LaunchSyslogServer()
   854  			if err != nil {
   855  				pluginClient.Kill()
   856  				return nil, fmt.Errorf("failed to start syslog collector: %v", err)
   857  			}
   858  			syslogAddr = ss.Addr
   859  		}
   860  	}
   861  
   862  	config, err := d.createContainerConfig(ctx, task, d.driverConfig, syslogAddr)
   863  	if err != nil {
   864  		d.logger.Printf("[ERR] driver.docker: failed to create container configuration for image %q (%q): %v", d.driverConfig.ImageName, d.imageID, err)
   865  		pluginClient.Kill()
   866  		return nil, fmt.Errorf("Failed to create container configuration for image %q (%q): %v", d.driverConfig.ImageName, d.imageID, err)
   867  	}
   868  
   869  	container, err := d.createContainer(client, config)
   870  	if err != nil {
   871  		wrapped := fmt.Sprintf("Failed to create container: %v", err)
   872  		d.logger.Printf("[ERR] driver.docker: %s", wrapped)
   873  		pluginClient.Kill()
   874  		return nil, structs.WrapRecoverable(wrapped, err)
   875  	}
   876  
   877  	d.logger.Printf("[INFO] driver.docker: created container %s", container.ID)
   878  
   879  	// We don't need to start the container if the container is already running
   880  	// since we don't create containers which are already present on the host
   881  	// and are running
   882  	if !container.State.Running {
   883  		// Start the container
   884  		if err := d.startContainer(container); err != nil {
   885  			d.logger.Printf("[ERR] driver.docker: failed to start container %s: %s", container.ID, err)
   886  			pluginClient.Kill()
   887  			return nil, structs.NewRecoverableError(fmt.Errorf("Failed to start container %s: %s", container.ID, err), structs.IsRecoverable(err))
   888  		}
   889  
   890  		// InspectContainer to get all of the container metadata as
   891  		// much of the metadata (eg networking) isn't populated until
   892  		// the container is started
   893  		runningContainer, err := client.InspectContainer(container.ID)
   894  		if err != nil {
   895  			err = fmt.Errorf("failed to inspect started container %s: %s", container.ID, err)
   896  			d.logger.Printf("[ERR] driver.docker: %v", err)
   897  			pluginClient.Kill()
   898  			return nil, structs.NewRecoverableError(err, true)
   899  		}
   900  		container = runningContainer
   901  		d.logger.Printf("[INFO] driver.docker: started container %s", container.ID)
   902  	} else {
   903  		d.logger.Printf("[DEBUG] driver.docker: re-attaching to container %s with status %q",
   904  			container.ID, container.State.String())
   905  	}
   906  
   907  	// Return a driver handle
   908  	maxKill := d.DriverContext.config.MaxKillTimeout
   909  	h := &DockerHandle{
   910  		client:                client,
   911  		waitClient:            waitClient,
   912  		executor:              exec,
   913  		pluginClient:          pluginClient,
   914  		logger:                d.logger,
   915  		jobName:               d.DriverContext.jobName,
   916  		taskGroupName:         d.DriverContext.taskGroupName,
   917  		taskName:              d.DriverContext.taskName,
   918  		Image:                 d.driverConfig.ImageName,
   919  		ImageID:               d.imageID,
   920  		containerID:           container.ID,
   921  		version:               d.config.Version.VersionNumber(),
   922  		killTimeout:           GetKillTimeout(task.KillTimeout, maxKill),
   923  		maxKillTimeout:        maxKill,
   924  		doneCh:                make(chan bool),
   925  		waitCh:                make(chan *dstructs.WaitResult, 1),
   926  		removeContainerOnExit: d.config.ReadBoolDefault(dockerCleanupContainerConfigOption, dockerCleanupContainerConfigDefault),
   927  	}
   928  	go h.collectStats()
   929  	go h.run()
   930  
   931  	// Detect container address
   932  	ip, autoUse := d.detectIP(container)
   933  
   934  	// Create a response with the driver handle and container network metadata
   935  	resp := &StartResponse{
   936  		Handle: h,
   937  		Network: &cstructs.DriverNetwork{
   938  			PortMap:       d.driverConfig.PortMap,
   939  			IP:            ip,
   940  			AutoAdvertise: autoUse,
   941  		},
   942  	}
   943  	return resp, nil
   944  }
   945  
   946  // detectIP of Docker container. Returns the first IP found as well as true if
   947  // the IP should be advertised (bridge network IPs return false). Returns an
   948  // empty string and false if no IP could be found.
   949  func (d *DockerDriver) detectIP(c *docker.Container) (string, bool) {
   950  	if c.NetworkSettings == nil {
   951  		// This should only happen if there's been a coding error (such
   952  		// as not calling InspectContainer after CreateContainer). Code
   953  		// defensively in case the Docker API changes subtly.
   954  		d.logger.Printf("[ERROR] driver.docker: no network settings for container %s", c.ID)
   955  		return "", false
   956  	}
   957  
   958  	ip, ipName := "", ""
   959  	auto := false
   960  	for name, net := range c.NetworkSettings.Networks {
   961  		if net.IPAddress == "" {
   962  			// Ignore networks without an IP address
   963  			continue
   964  		}
   965  
   966  		ip = net.IPAddress
   967  		if d.driverConfig.AdvertiseIPv6Address {
   968  			ip = net.GlobalIPv6Address
   969  			auto = true
   970  		}
   971  		ipName = name
   972  
   973  		// Don't auto-advertise IPs for default networks (bridge on
   974  		// Linux, nat on Windows)
   975  		if name != "bridge" && name != "nat" {
   976  			auto = true
   977  		}
   978  
   979  		break
   980  	}
   981  
   982  	if n := len(c.NetworkSettings.Networks); n > 1 {
   983  		d.logger.Printf("[WARN] driver.docker: task %s multiple (%d) Docker networks for container %q but Nomad only supports 1: choosing %q", d.taskName, n, c.ID, ipName)
   984  	}
   985  
   986  	return ip, auto
   987  }
   988  
   989  func (d *DockerDriver) Cleanup(_ *ExecContext, res *CreatedResources) error {
   990  	retry := false
   991  	var merr multierror.Error
   992  	for key, resources := range res.Resources {
   993  		switch key {
   994  		case dockerImageResKey:
   995  			for _, value := range resources {
   996  				err := d.cleanupImage(value)
   997  				if err != nil {
   998  					if structs.IsRecoverable(err) {
   999  						retry = true
  1000  					}
  1001  					merr.Errors = append(merr.Errors, err)
  1002  					continue
  1003  				}
  1004  
  1005  				// Remove cleaned image from resources
  1006  				res.Remove(dockerImageResKey, value)
  1007  			}
  1008  		default:
  1009  			d.logger.Printf("[ERR] driver.docker: unknown resource to cleanup: %q", key)
  1010  		}
  1011  	}
  1012  	return structs.NewRecoverableError(merr.ErrorOrNil(), retry)
  1013  }
  1014  
  1015  // cleanupImage removes a Docker image. No error is returned if the image
  1016  // doesn't exist or is still in use. Requires the global client to already be
  1017  // initialized.
  1018  func (d *DockerDriver) cleanupImage(imageID string) error {
  1019  	if !d.config.ReadBoolDefault(dockerCleanupImageConfigOption, dockerCleanupImageConfigDefault) {
  1020  		// Config says not to cleanup
  1021  		return nil
  1022  	}
  1023  
  1024  	coordinator, callerID := d.getDockerCoordinator(client)
  1025  	coordinator.RemoveImage(imageID, callerID)
  1026  
  1027  	return nil
  1028  }
  1029  
  1030  // dockerHealthCheckClient creates a single *docker.Client with a timeout of
  1031  // one minute, which will be used when performing Docker health checks.
  1032  func (d *DockerDriver) dockerHealthCheckClient() (*docker.Client, error) {
  1033  	createClientsLock.Lock()
  1034  	defer createClientsLock.Unlock()
  1035  
  1036  	if healthCheckClient != nil {
  1037  		return healthCheckClient, nil
  1038  	}
  1039  
  1040  	var err error
  1041  	healthCheckClient, err = d.newDockerClient(dockerHealthCheckTimeout)
  1042  	if err != nil {
  1043  		return nil, err
  1044  	}
  1045  
  1046  	return healthCheckClient, nil
  1047  }
  1048  
  1049  // dockerClients creates two *docker.Client, one for long running operations and
  1050  // the other for shorter operations. In test / dev mode we can use ENV vars to
  1051  // connect to the docker daemon. In production mode we will read docker.endpoint
  1052  // from the config file.
  1053  func (d *DockerDriver) dockerClients() (*docker.Client, *docker.Client, error) {
  1054  	createClientsLock.Lock()
  1055  	defer createClientsLock.Unlock()
  1056  
  1057  	if client != nil && waitClient != nil {
  1058  		return client, waitClient, nil
  1059  	}
  1060  
  1061  	var err error
  1062  
  1063  	// Onlt initialize the client if it hasn't yet been done
  1064  	if client == nil {
  1065  		client, err = d.newDockerClient(dockerTimeout)
  1066  		if err != nil {
  1067  			return nil, nil, err
  1068  		}
  1069  	}
  1070  
  1071  	// Only initialize the waitClient if it hasn't yet been done
  1072  	if waitClient == nil {
  1073  		waitClient, err = d.newDockerClient(0 * time.Minute)
  1074  		if err != nil {
  1075  			return nil, nil, err
  1076  		}
  1077  	}
  1078  
  1079  	return client, waitClient, nil
  1080  }
  1081  
  1082  // newDockerClient creates a new *docker.Client with a configurable timeout
  1083  func (d *DockerDriver) newDockerClient(timeout time.Duration) (*docker.Client, error) {
  1084  	var err error
  1085  	var merr multierror.Error
  1086  	var newClient *docker.Client
  1087  
  1088  	// Default to using whatever is configured in docker.endpoint. If this is
  1089  	// not specified we'll fall back on NewClientFromEnv which reads config from
  1090  	// the DOCKER_* environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and
  1091  	// DOCKER_CERT_PATH. This allows us to lock down the config in production
  1092  	// but also accept the standard ENV configs for dev and test.
  1093  	dockerEndpoint := d.config.Read("docker.endpoint")
  1094  	if dockerEndpoint != "" {
  1095  		cert := d.config.Read("docker.tls.cert")
  1096  		key := d.config.Read("docker.tls.key")
  1097  		ca := d.config.Read("docker.tls.ca")
  1098  
  1099  		if cert+key+ca != "" {
  1100  			d.logger.Printf("[DEBUG] driver.docker: using TLS client connection to %s", dockerEndpoint)
  1101  			newClient, err = docker.NewTLSClient(dockerEndpoint, cert, key, ca)
  1102  			if err != nil {
  1103  				merr.Errors = append(merr.Errors, err)
  1104  			}
  1105  		} else {
  1106  			d.logger.Printf("[DEBUG] driver.docker: using standard client connection to %s", dockerEndpoint)
  1107  			newClient, err = docker.NewClient(dockerEndpoint)
  1108  			if err != nil {
  1109  				merr.Errors = append(merr.Errors, err)
  1110  			}
  1111  		}
  1112  	} else {
  1113  		d.logger.Println("[DEBUG] driver.docker: using client connection initialized from environment")
  1114  		newClient, err = docker.NewClientFromEnv()
  1115  		if err != nil {
  1116  			merr.Errors = append(merr.Errors, err)
  1117  		}
  1118  	}
  1119  
  1120  	if timeout != 0 && newClient != nil {
  1121  		newClient.SetTimeout(timeout)
  1122  	}
  1123  	return newClient, merr.ErrorOrNil()
  1124  }
  1125  
  1126  func (d *DockerDriver) containerBinds(driverConfig *DockerDriverConfig, ctx *ExecContext,
  1127  	task *structs.Task) ([]string, error) {
  1128  
  1129  	allocDirBind := fmt.Sprintf("%s:%s", ctx.TaskDir.SharedAllocDir, ctx.TaskEnv.EnvMap[env.AllocDir])
  1130  	taskLocalBind := fmt.Sprintf("%s:%s", ctx.TaskDir.LocalDir, ctx.TaskEnv.EnvMap[env.TaskLocalDir])
  1131  	secretDirBind := fmt.Sprintf("%s:%s", ctx.TaskDir.SecretsDir, ctx.TaskEnv.EnvMap[env.SecretsDir])
  1132  	binds := []string{allocDirBind, taskLocalBind, secretDirBind}
  1133  
  1134  	volumesEnabled := d.config.ReadBoolDefault(dockerVolumesConfigOption, dockerVolumesConfigDefault)
  1135  
  1136  	if !volumesEnabled && driverConfig.VolumeDriver != "" {
  1137  		return nil, fmt.Errorf("%s is false; cannot use volume driver %q", dockerVolumesConfigOption, driverConfig.VolumeDriver)
  1138  	}
  1139  
  1140  	for _, userbind := range driverConfig.Volumes {
  1141  		parts := strings.Split(userbind, ":")
  1142  		if len(parts) < 2 {
  1143  			return nil, fmt.Errorf("invalid docker volume: %q", userbind)
  1144  		}
  1145  
  1146  		// Resolve dotted path segments
  1147  		parts[0] = filepath.Clean(parts[0])
  1148  
  1149  		// Absolute paths aren't always supported
  1150  		if filepath.IsAbs(parts[0]) {
  1151  			if !volumesEnabled {
  1152  				// Disallow mounting arbitrary absolute paths
  1153  				return nil, fmt.Errorf("%s is false; cannot mount host paths: %+q", dockerVolumesConfigOption, userbind)
  1154  			}
  1155  			binds = append(binds, userbind)
  1156  			continue
  1157  		}
  1158  
  1159  		// Relative paths are always allowed as they mount within a container
  1160  		// When a VolumeDriver is set, we assume we receive a binding in the format volume-name:container-dest
  1161  		// Otherwise, we assume we receive a relative path binding in the format relative/to/task:/also/in/container
  1162  		if driverConfig.VolumeDriver == "" {
  1163  			// Expand path relative to alloc dir
  1164  			parts[0] = filepath.Join(ctx.TaskDir.Dir, parts[0])
  1165  		}
  1166  
  1167  		binds = append(binds, strings.Join(parts, ":"))
  1168  	}
  1169  
  1170  	if selinuxLabel := d.config.Read(dockerSELinuxLabelConfigOption); selinuxLabel != "" {
  1171  		// Apply SELinux Label to each volume
  1172  		for i := range binds {
  1173  			binds[i] = fmt.Sprintf("%s:%s", binds[i], selinuxLabel)
  1174  		}
  1175  	}
  1176  
  1177  	return binds, nil
  1178  }
  1179  
  1180  // createContainerConfig initializes a struct needed to call docker.client.CreateContainer()
  1181  func (d *DockerDriver) createContainerConfig(ctx *ExecContext, task *structs.Task,
  1182  	driverConfig *DockerDriverConfig, syslogAddr string) (docker.CreateContainerOptions, error) {
  1183  	var c docker.CreateContainerOptions
  1184  	if task.Resources == nil {
  1185  		// Guard against missing resources. We should never have been able to
  1186  		// schedule a job without specifying this.
  1187  		d.logger.Println("[ERR] driver.docker: task.Resources is empty")
  1188  		return c, fmt.Errorf("task.Resources is empty")
  1189  	}
  1190  
  1191  	binds, err := d.containerBinds(driverConfig, ctx, task)
  1192  	if err != nil {
  1193  		return c, err
  1194  	}
  1195  
  1196  	// create the config block that will later be consumed by go-dockerclient
  1197  	config := &docker.Config{
  1198  		Image:       d.imageID,
  1199  		Entrypoint:  driverConfig.Entrypoint,
  1200  		Hostname:    driverConfig.Hostname,
  1201  		User:        task.User,
  1202  		Tty:         driverConfig.TTY,
  1203  		OpenStdin:   driverConfig.Interactive,
  1204  		StopTimeout: int(task.KillTimeout.Seconds()),
  1205  		StopSignal:  task.KillSignal,
  1206  	}
  1207  
  1208  	if driverConfig.WorkDir != "" {
  1209  		config.WorkingDir = driverConfig.WorkDir
  1210  	}
  1211  
  1212  	memLimit := int64(task.Resources.MemoryMB) * 1024 * 1024
  1213  
  1214  	if len(driverConfig.Logging) == 0 {
  1215  		if runtime.GOOS == "darwin" {
  1216  			d.logger.Printf("[DEBUG] driver.docker: deferring logging to docker on Docker for Mac")
  1217  		} else {
  1218  			d.logger.Printf("[DEBUG] driver.docker: Setting default logging options to syslog and %s", syslogAddr)
  1219  			driverConfig.Logging = []DockerLoggingOpts{
  1220  				{Type: "syslog", Config: map[string]string{"syslog-address": syslogAddr}},
  1221  			}
  1222  		}
  1223  	}
  1224  
  1225  	hostConfig := &docker.HostConfig{
  1226  		// Convert MB to bytes. This is an absolute value.
  1227  		Memory: memLimit,
  1228  		// Convert Mhz to shares. This is a relative value.
  1229  		CPUShares: int64(task.Resources.CPU),
  1230  
  1231  		// Binds are used to mount a host volume into the container. We mount a
  1232  		// local directory for storage and a shared alloc directory that can be
  1233  		// used to share data between different tasks in the same task group.
  1234  		Binds: binds,
  1235  
  1236  		VolumeDriver: driverConfig.VolumeDriver,
  1237  
  1238  		PidsLimit: driverConfig.PidsLimit,
  1239  	}
  1240  
  1241  	// Calculate CPU Quota
  1242  	// cfs_quota_us is the time per core, so we must
  1243  	// multiply the time by the number of cores available
  1244  	// See https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/sec-cpu
  1245  	if driverConfig.CPUHardLimit {
  1246  		numCores := runtime.NumCPU()
  1247  		percentTicks := float64(task.Resources.CPU) / float64(d.node.Resources.CPU)
  1248  		hostConfig.CPUQuota = int64(percentTicks*defaultCFSPeriodUS) * int64(numCores)
  1249  	}
  1250  
  1251  	// Windows does not support MemorySwap/MemorySwappiness #2193
  1252  	if runtime.GOOS == "windows" {
  1253  		hostConfig.MemorySwap = 0
  1254  		hostConfig.MemorySwappiness = -1
  1255  	} else {
  1256  		hostConfig.MemorySwap = memLimit // MemorySwap is memory + swap.
  1257  	}
  1258  
  1259  	if len(driverConfig.Logging) != 0 {
  1260  		d.logger.Printf("[DEBUG] driver.docker: Using config for logging: %+v", driverConfig.Logging[0])
  1261  		hostConfig.LogConfig = docker.LogConfig{
  1262  			Type:   driverConfig.Logging[0].Type,
  1263  			Config: driverConfig.Logging[0].Config,
  1264  		}
  1265  	}
  1266  
  1267  	d.logger.Printf("[DEBUG] driver.docker: using %d bytes memory for %s", hostConfig.Memory, task.Name)
  1268  	d.logger.Printf("[DEBUG] driver.docker: using %d cpu shares for %s", hostConfig.CPUShares, task.Name)
  1269  	if driverConfig.CPUHardLimit {
  1270  		d.logger.Printf("[DEBUG] driver.docker: using %dms cpu quota and %dms cpu period for %s", hostConfig.CPUQuota, defaultCFSPeriodUS, task.Name)
  1271  	}
  1272  	d.logger.Printf("[DEBUG] driver.docker: binding directories %#v for %s", hostConfig.Binds, task.Name)
  1273  
  1274  	//  set privileged mode
  1275  	hostPrivileged := d.config.ReadBoolDefault(dockerPrivilegedConfigOption, false)
  1276  	if driverConfig.Privileged && !hostPrivileged {
  1277  		return c, fmt.Errorf(`Docker privileged mode is disabled on this Nomad agent`)
  1278  	}
  1279  	hostConfig.Privileged = driverConfig.Privileged
  1280  
  1281  	// set capabilities
  1282  	hostCapsWhitelistConfig := d.config.ReadDefault(
  1283  		dockerCapsWhitelistConfigOption, dockerCapsWhitelistConfigDefault)
  1284  	hostCapsWhitelist := make(map[string]struct{})
  1285  	for _, cap := range strings.Split(hostCapsWhitelistConfig, ",") {
  1286  		cap = strings.ToLower(strings.TrimSpace(cap))
  1287  		hostCapsWhitelist[cap] = struct{}{}
  1288  	}
  1289  
  1290  	if _, ok := hostCapsWhitelist["all"]; !ok {
  1291  		effectiveCaps, err := tweakCapabilities(
  1292  			strings.Split(dockerBasicCaps, ","),
  1293  			driverConfig.CapAdd,
  1294  			driverConfig.CapDrop,
  1295  		)
  1296  		if err != nil {
  1297  			return c, err
  1298  		}
  1299  		var missingCaps []string
  1300  		for _, cap := range effectiveCaps {
  1301  			cap = strings.ToLower(cap)
  1302  			if _, ok := hostCapsWhitelist[cap]; !ok {
  1303  				missingCaps = append(missingCaps, cap)
  1304  			}
  1305  		}
  1306  		if len(missingCaps) > 0 {
  1307  			return c, fmt.Errorf("Docker driver doesn't have the following caps whitelisted on this Nomad agent: %s", missingCaps)
  1308  		}
  1309  	}
  1310  
  1311  	hostConfig.CapAdd = driverConfig.CapAdd
  1312  	hostConfig.CapDrop = driverConfig.CapDrop
  1313  
  1314  	// set SHM size
  1315  	if driverConfig.ShmSize != 0 {
  1316  		hostConfig.ShmSize = driverConfig.ShmSize
  1317  	}
  1318  
  1319  	// set DNS servers
  1320  	for _, ip := range driverConfig.DNSServers {
  1321  		if net.ParseIP(ip) != nil {
  1322  			hostConfig.DNS = append(hostConfig.DNS, ip)
  1323  		} else {
  1324  			d.logger.Printf("[ERR] driver.docker: invalid ip address for container dns server: %s", ip)
  1325  		}
  1326  	}
  1327  
  1328  	if len(driverConfig.Devices) > 0 {
  1329  		var devices []docker.Device
  1330  		for _, device := range driverConfig.Devices {
  1331  			dev := docker.Device{
  1332  				PathOnHost:        device.HostPath,
  1333  				PathInContainer:   device.ContainerPath,
  1334  				CgroupPermissions: device.CgroupPermissions}
  1335  			devices = append(devices, dev)
  1336  		}
  1337  		hostConfig.Devices = devices
  1338  	}
  1339  
  1340  	// Setup mounts
  1341  	for _, m := range driverConfig.Mounts {
  1342  		hm := docker.HostMount{
  1343  			Target:   m.Target,
  1344  			Source:   m.Source,
  1345  			Type:     "volume", // Only type supported
  1346  			ReadOnly: m.ReadOnly,
  1347  		}
  1348  		if len(m.VolumeOptions) == 1 {
  1349  			vo := m.VolumeOptions[0]
  1350  			hm.VolumeOptions = &docker.VolumeOptions{
  1351  				NoCopy: vo.NoCopy,
  1352  			}
  1353  
  1354  			if len(vo.DriverConfig) == 1 {
  1355  				dc := vo.DriverConfig[0]
  1356  				hm.VolumeOptions.DriverConfig = docker.VolumeDriverConfig{
  1357  					Name: dc.Name,
  1358  				}
  1359  				if len(dc.Options) == 1 {
  1360  					hm.VolumeOptions.DriverConfig.Options = dc.Options[0]
  1361  				}
  1362  			}
  1363  			if len(vo.Labels) == 1 {
  1364  				hm.VolumeOptions.Labels = vo.Labels[0]
  1365  			}
  1366  		}
  1367  		hostConfig.Mounts = append(hostConfig.Mounts, hm)
  1368  	}
  1369  
  1370  	// set DNS search domains and extra hosts
  1371  	hostConfig.DNSSearch = driverConfig.DNSSearchDomains
  1372  	hostConfig.DNSOptions = driverConfig.DNSOptions
  1373  	hostConfig.ExtraHosts = driverConfig.ExtraHosts
  1374  
  1375  	hostConfig.IpcMode = driverConfig.IpcMode
  1376  	hostConfig.PidMode = driverConfig.PidMode
  1377  	hostConfig.UTSMode = driverConfig.UTSMode
  1378  	hostConfig.UsernsMode = driverConfig.UsernsMode
  1379  	hostConfig.SecurityOpt = driverConfig.SecurityOpt
  1380  	hostConfig.Sysctls = driverConfig.Sysctl
  1381  	hostConfig.Ulimits = driverConfig.Ulimit
  1382  	hostConfig.ReadonlyRootfs = driverConfig.ReadonlyRootfs
  1383  
  1384  	hostConfig.NetworkMode = driverConfig.NetworkMode
  1385  	if hostConfig.NetworkMode == "" {
  1386  		// docker default
  1387  		d.logger.Printf("[DEBUG] driver.docker: networking mode not specified; defaulting to %s", defaultNetworkMode)
  1388  		hostConfig.NetworkMode = defaultNetworkMode
  1389  	}
  1390  
  1391  	// Setup port mapping and exposed ports
  1392  	if len(task.Resources.Networks) == 0 {
  1393  		d.logger.Println("[DEBUG] driver.docker: No network interfaces are available")
  1394  		if len(driverConfig.PortMap) > 0 {
  1395  			return c, fmt.Errorf("Trying to map ports but no network interface is available")
  1396  		}
  1397  	} else {
  1398  		// TODO add support for more than one network
  1399  		network := task.Resources.Networks[0]
  1400  		publishedPorts := map[docker.Port][]docker.PortBinding{}
  1401  		exposedPorts := map[docker.Port]struct{}{}
  1402  
  1403  		for _, port := range network.ReservedPorts {
  1404  			// By default we will map the allocated port 1:1 to the container
  1405  			containerPortInt := port.Value
  1406  
  1407  			// If the user has mapped a port using port_map we'll change it here
  1408  			if mapped, ok := driverConfig.PortMap[port.Label]; ok {
  1409  				containerPortInt = mapped
  1410  			}
  1411  
  1412  			hostPortStr := strconv.Itoa(port.Value)
  1413  			containerPort := docker.Port(strconv.Itoa(containerPortInt))
  1414  
  1415  			publishedPorts[containerPort+"/tcp"] = getPortBinding(network.IP, hostPortStr)
  1416  			publishedPorts[containerPort+"/udp"] = getPortBinding(network.IP, hostPortStr)
  1417  			d.logger.Printf("[DEBUG] driver.docker: allocated port %s:%d -> %d (static)", network.IP, port.Value, port.Value)
  1418  
  1419  			exposedPorts[containerPort+"/tcp"] = struct{}{}
  1420  			exposedPorts[containerPort+"/udp"] = struct{}{}
  1421  			d.logger.Printf("[DEBUG] driver.docker: exposed port %d", port.Value)
  1422  		}
  1423  
  1424  		for _, port := range network.DynamicPorts {
  1425  			// By default we will map the allocated port 1:1 to the container
  1426  			containerPortInt := port.Value
  1427  
  1428  			// If the user has mapped a port using port_map we'll change it here
  1429  			if mapped, ok := driverConfig.PortMap[port.Label]; ok {
  1430  				containerPortInt = mapped
  1431  			}
  1432  
  1433  			hostPortStr := strconv.Itoa(port.Value)
  1434  			containerPort := docker.Port(strconv.Itoa(containerPortInt))
  1435  
  1436  			publishedPorts[containerPort+"/tcp"] = getPortBinding(network.IP, hostPortStr)
  1437  			publishedPorts[containerPort+"/udp"] = getPortBinding(network.IP, hostPortStr)
  1438  			d.logger.Printf("[DEBUG] driver.docker: allocated port %s:%d -> %d (mapped)", network.IP, port.Value, containerPortInt)
  1439  
  1440  			exposedPorts[containerPort+"/tcp"] = struct{}{}
  1441  			exposedPorts[containerPort+"/udp"] = struct{}{}
  1442  			d.logger.Printf("[DEBUG] driver.docker: exposed port %s", containerPort)
  1443  		}
  1444  
  1445  		hostConfig.PortBindings = publishedPorts
  1446  		config.ExposedPorts = exposedPorts
  1447  	}
  1448  
  1449  	parsedArgs := ctx.TaskEnv.ParseAndReplace(driverConfig.Args)
  1450  
  1451  	// If the user specified a custom command to run, we'll inject it here.
  1452  	if driverConfig.Command != "" {
  1453  		// Validate command
  1454  		if err := validateCommand(driverConfig.Command, "args"); err != nil {
  1455  			return c, err
  1456  		}
  1457  
  1458  		cmd := []string{driverConfig.Command}
  1459  		if len(driverConfig.Args) != 0 {
  1460  			cmd = append(cmd, parsedArgs...)
  1461  		}
  1462  		d.logger.Printf("[DEBUG] driver.docker: setting container startup command to: %s", strings.Join(cmd, " "))
  1463  		config.Cmd = cmd
  1464  	} else if len(driverConfig.Args) != 0 {
  1465  		config.Cmd = parsedArgs
  1466  	}
  1467  
  1468  	if len(driverConfig.Labels) > 0 {
  1469  		config.Labels = driverConfig.Labels
  1470  		d.logger.Printf("[DEBUG] driver.docker: applied labels on the container: %+v", config.Labels)
  1471  	}
  1472  
  1473  	config.Env = ctx.TaskEnv.List()
  1474  
  1475  	containerName := fmt.Sprintf("%s-%s", task.Name, d.DriverContext.allocID)
  1476  	d.logger.Printf("[DEBUG] driver.docker: setting container name to: %s", containerName)
  1477  
  1478  	var networkingConfig *docker.NetworkingConfig
  1479  	if len(driverConfig.NetworkAliases) > 0 || driverConfig.IPv4Address != "" || driverConfig.IPv6Address != "" {
  1480  		networkingConfig = &docker.NetworkingConfig{
  1481  			EndpointsConfig: map[string]*docker.EndpointConfig{
  1482  				hostConfig.NetworkMode: {},
  1483  			},
  1484  		}
  1485  	}
  1486  
  1487  	if len(driverConfig.NetworkAliases) > 0 {
  1488  		networkingConfig.EndpointsConfig[hostConfig.NetworkMode].Aliases = driverConfig.NetworkAliases
  1489  		d.logger.Printf("[DEBUG] driver.docker: using network_mode %q with network aliases: %v",
  1490  			hostConfig.NetworkMode, strings.Join(driverConfig.NetworkAliases, ", "))
  1491  	}
  1492  
  1493  	if driverConfig.IPv4Address != "" || driverConfig.IPv6Address != "" {
  1494  		networkingConfig.EndpointsConfig[hostConfig.NetworkMode].IPAMConfig = &docker.EndpointIPAMConfig{
  1495  			IPv4Address: driverConfig.IPv4Address,
  1496  			IPv6Address: driverConfig.IPv6Address,
  1497  		}
  1498  		d.logger.Printf("[DEBUG] driver.docker: using network_mode %q with ipv4: %q and ipv6: %q",
  1499  			hostConfig.NetworkMode, driverConfig.IPv4Address, driverConfig.IPv6Address)
  1500  	}
  1501  
  1502  	if driverConfig.MacAddress != "" {
  1503  		config.MacAddress = driverConfig.MacAddress
  1504  		d.logger.Printf("[DEBUG] driver.docker: using pinned mac address: %q", config.MacAddress)
  1505  	}
  1506  
  1507  	return docker.CreateContainerOptions{
  1508  		Name:             containerName,
  1509  		Config:           config,
  1510  		HostConfig:       hostConfig,
  1511  		NetworkingConfig: networkingConfig,
  1512  	}, nil
  1513  }
  1514  
  1515  func (d *DockerDriver) Periodic() (bool, time.Duration) {
  1516  	return true, 15 * time.Second
  1517  }
  1518  
  1519  // createImage creates a docker image either by pulling it from a registry or by
  1520  // loading it from the file system
  1521  func (d *DockerDriver) createImage(driverConfig *DockerDriverConfig, client *docker.Client, taskDir *allocdir.TaskDir) (string, error) {
  1522  	image := driverConfig.ImageName
  1523  	repo, tag := parseDockerImage(image)
  1524  
  1525  	coordinator, callerID := d.getDockerCoordinator(client)
  1526  
  1527  	// We're going to check whether the image is already downloaded. If the tag
  1528  	// is "latest", or ForcePull is set, we have to check for a new version every time so we don't
  1529  	// bother to check and cache the id here. We'll download first, then cache.
  1530  	if driverConfig.ForcePull {
  1531  		d.logger.Printf("[DEBUG] driver.docker: force pull image '%s' instead of inspecting local", dockerImageRef(repo, tag))
  1532  	} else if tag != "latest" {
  1533  		if dockerImage, _ := client.InspectImage(image); dockerImage != nil {
  1534  			// Image exists so just increment its reference count
  1535  			coordinator.IncrementImageReference(dockerImage.ID, image, callerID)
  1536  			return dockerImage.ID, nil
  1537  		}
  1538  	}
  1539  
  1540  	// Load the image if specified
  1541  	if driverConfig.LoadImage != "" {
  1542  		return d.loadImage(driverConfig, client, taskDir)
  1543  	}
  1544  
  1545  	// Download the image
  1546  	return d.pullImage(driverConfig, client, repo, tag)
  1547  }
  1548  
  1549  // pullImage creates an image by pulling it from a docker registry
  1550  func (d *DockerDriver) pullImage(driverConfig *DockerDriverConfig, client *docker.Client, repo, tag string) (id string, err error) {
  1551  	authOptions, err := d.resolveRegistryAuthentication(driverConfig, repo)
  1552  	if err != nil {
  1553  		if d.driverConfig.AuthSoftFail {
  1554  			d.logger.Printf("[WARN] Failed to find docker auth for repo %q: %v", repo, err)
  1555  		} else {
  1556  			return "", fmt.Errorf("Failed to find docker auth for repo %q: %v", repo, err)
  1557  		}
  1558  	}
  1559  
  1560  	if authIsEmpty(authOptions) {
  1561  		d.logger.Printf("[DEBUG] driver.docker: did not find docker auth for repo %q", repo)
  1562  	}
  1563  
  1564  	d.emitEvent("Downloading image %s", dockerImageRef(repo, tag))
  1565  	coordinator, callerID := d.getDockerCoordinator(client)
  1566  
  1567  	return coordinator.PullImage(driverConfig.ImageName, authOptions, callerID, d.emitEvent)
  1568  }
  1569  
  1570  // authBackend encapsulates a function that resolves registry credentials.
  1571  type authBackend func(string) (*docker.AuthConfiguration, error)
  1572  
  1573  // resolveRegistryAuthentication attempts to retrieve auth credentials for the
  1574  // repo, trying all authentication-backends possible.
  1575  func (d *DockerDriver) resolveRegistryAuthentication(driverConfig *DockerDriverConfig, repo string) (*docker.AuthConfiguration, error) {
  1576  	return firstValidAuth(repo, []authBackend{
  1577  		authFromTaskConfig(driverConfig),
  1578  		authFromDockerConfig(d.config.Read("docker.auth.config")),
  1579  		authFromHelper(d.config.Read("docker.auth.helper")),
  1580  	})
  1581  }
  1582  
  1583  // loadImage creates an image by loading it from the file system
  1584  func (d *DockerDriver) loadImage(driverConfig *DockerDriverConfig, client *docker.Client,
  1585  	taskDir *allocdir.TaskDir) (id string, err error) {
  1586  
  1587  	archive := filepath.Join(taskDir.LocalDir, driverConfig.LoadImage)
  1588  	d.logger.Printf("[DEBUG] driver.docker: loading image from: %v", archive)
  1589  
  1590  	f, err := os.Open(archive)
  1591  	if err != nil {
  1592  		return "", fmt.Errorf("unable to open image archive: %v", err)
  1593  	}
  1594  
  1595  	if err := client.LoadImage(docker.LoadImageOptions{InputStream: f}); err != nil {
  1596  		return "", err
  1597  	}
  1598  	f.Close()
  1599  
  1600  	dockerImage, err := client.InspectImage(driverConfig.ImageName)
  1601  	if err != nil {
  1602  		return "", recoverableErrTimeouts(err)
  1603  	}
  1604  
  1605  	coordinator, callerID := d.getDockerCoordinator(client)
  1606  	coordinator.IncrementImageReference(dockerImage.ID, driverConfig.ImageName, callerID)
  1607  	return dockerImage.ID, nil
  1608  }
  1609  
  1610  // createContainer creates the container given the passed configuration. It
  1611  // attempts to handle any transient Docker errors.
  1612  func (d *DockerDriver) createContainer(client createContainerClient, config docker.CreateContainerOptions) (*docker.Container, error) {
  1613  	// Create a container
  1614  	attempted := 0
  1615  CREATE:
  1616  	container, createErr := client.CreateContainer(config)
  1617  	if createErr == nil {
  1618  		return container, nil
  1619  	}
  1620  
  1621  	d.logger.Printf("[DEBUG] driver.docker: failed to create container %q from image %q (ID: %q) (attempt %d): %v",
  1622  		config.Name, d.driverConfig.ImageName, d.imageID, attempted+1, createErr)
  1623  
  1624  	// Volume management tools like Portworx may not have detached a volume
  1625  	// from a previous node before Nomad started a task replacement task.
  1626  	// Treat these errors as recoverable so we retry.
  1627  	if strings.Contains(strings.ToLower(createErr.Error()), "volume is attached on another node") {
  1628  		return nil, structs.NewRecoverableError(createErr, true)
  1629  	}
  1630  
  1631  	// If the container already exists determine whether it's already
  1632  	// running or if it's dead and needs to be recreated.
  1633  	if strings.Contains(strings.ToLower(createErr.Error()), "container already exists") {
  1634  		containers, err := client.ListContainers(docker.ListContainersOptions{
  1635  			All: true,
  1636  		})
  1637  		if err != nil {
  1638  			d.logger.Printf("[ERR] driver.docker: failed to query list of containers matching name:%s", config.Name)
  1639  			return nil, recoverableErrTimeouts(fmt.Errorf("Failed to query list of containers: %s", err))
  1640  		}
  1641  
  1642  		// Delete matching containers
  1643  		// Adding a / infront of the container name since Docker returns the
  1644  		// container names with a / pre-pended to the Nomad generated container names
  1645  		containerName := "/" + config.Name
  1646  		d.logger.Printf("[DEBUG] driver.docker: searching for container name %q to purge", containerName)
  1647  		for _, shimContainer := range containers {
  1648  			d.logger.Printf("[DEBUG] driver.docker: listed container %+v", shimContainer.Names)
  1649  			found := false
  1650  			for _, name := range shimContainer.Names {
  1651  				if name == containerName {
  1652  					d.logger.Printf("[DEBUG] driver.docker: Found container %v: %v", containerName, shimContainer.ID)
  1653  					found = true
  1654  					break
  1655  				}
  1656  			}
  1657  
  1658  			if !found {
  1659  				continue
  1660  			}
  1661  
  1662  			// Inspect the container and if the container isn't dead then return
  1663  			// the container
  1664  			container, err := client.InspectContainer(shimContainer.ID)
  1665  			if err != nil {
  1666  				err = fmt.Errorf("Failed to inspect container %s: %s", shimContainer.ID, err)
  1667  
  1668  				// This error is always recoverable as it could
  1669  				// be caused by races between listing
  1670  				// containers and this container being removed.
  1671  				// See #2802
  1672  				return nil, structs.NewRecoverableError(err, true)
  1673  			}
  1674  			if container != nil && container.State.Running {
  1675  				return container, nil
  1676  			}
  1677  
  1678  			err = client.RemoveContainer(docker.RemoveContainerOptions{
  1679  				ID:    container.ID,
  1680  				Force: true,
  1681  			})
  1682  			if err != nil {
  1683  				d.logger.Printf("[ERR] driver.docker: failed to purge container %s", container.ID)
  1684  				return nil, recoverableErrTimeouts(fmt.Errorf("Failed to purge container %s: %s", container.ID, err))
  1685  			} else if err == nil {
  1686  				d.logger.Printf("[INFO] driver.docker: purged container %s", container.ID)
  1687  			}
  1688  		}
  1689  
  1690  		if attempted < 5 {
  1691  			attempted++
  1692  			time.Sleep(1 * time.Second)
  1693  			goto CREATE
  1694  		}
  1695  	} else if strings.Contains(strings.ToLower(createErr.Error()), "no such image") {
  1696  		// There is still a very small chance this is possible even with the
  1697  		// coordinator so retry.
  1698  		return nil, structs.NewRecoverableError(createErr, true)
  1699  	}
  1700  
  1701  	return nil, recoverableErrTimeouts(createErr)
  1702  }
  1703  
  1704  // startContainer starts the passed container. It attempts to handle any
  1705  // transient Docker errors.
  1706  func (d *DockerDriver) startContainer(c *docker.Container) error {
  1707  	// Start a container
  1708  	attempted := 0
  1709  START:
  1710  	startErr := client.StartContainer(c.ID, c.HostConfig)
  1711  	if startErr == nil {
  1712  		return nil
  1713  	}
  1714  
  1715  	d.logger.Printf("[DEBUG] driver.docker: failed to start container %q (attempt %d): %v", c.ID, attempted+1, startErr)
  1716  
  1717  	// If it is a 500 error it is likely we can retry and be successful
  1718  	if strings.Contains(startErr.Error(), "API error (500)") {
  1719  		if attempted < 5 {
  1720  			attempted++
  1721  			time.Sleep(1 * time.Second)
  1722  			goto START
  1723  		}
  1724  		return structs.NewRecoverableError(startErr, true)
  1725  	}
  1726  
  1727  	return recoverableErrTimeouts(startErr)
  1728  }
  1729  
  1730  func (d *DockerDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {
  1731  	// Split the handle
  1732  	pidBytes := []byte(strings.TrimPrefix(handleID, "DOCKER:"))
  1733  	pid := &dockerPID{}
  1734  	if err := json.Unmarshal(pidBytes, pid); err != nil {
  1735  		return nil, fmt.Errorf("Failed to parse handle '%s': %v", handleID, err)
  1736  	}
  1737  	d.logger.Printf("[INFO] driver.docker: re-attaching to docker process: %s", pid.ContainerID)
  1738  	d.logger.Printf("[DEBUG] driver.docker: re-attached to handle: %s", handleID)
  1739  	pluginConfig := &plugin.ClientConfig{
  1740  		Reattach: pid.PluginConfig.PluginConfig(),
  1741  	}
  1742  
  1743  	client, waitClient, err := d.dockerClients()
  1744  	if err != nil {
  1745  		return nil, fmt.Errorf("Failed to connect to docker daemon: %s", err)
  1746  	}
  1747  
  1748  	// Look for a running container with this ID
  1749  	containers, err := client.ListContainers(docker.ListContainersOptions{
  1750  		Filters: map[string][]string{
  1751  			"id": {pid.ContainerID},
  1752  		},
  1753  	})
  1754  	if err != nil {
  1755  		return nil, fmt.Errorf("Failed to query for container %s: %v", pid.ContainerID, err)
  1756  	}
  1757  
  1758  	found := false
  1759  	for _, container := range containers {
  1760  		if container.ID == pid.ContainerID {
  1761  			found = true
  1762  		}
  1763  	}
  1764  	if !found {
  1765  		return nil, fmt.Errorf("Failed to find container %s", pid.ContainerID)
  1766  	}
  1767  	exec, pluginClient, err := createExecutorWithConfig(pluginConfig, d.config.LogOutput)
  1768  	if err != nil {
  1769  		d.logger.Printf("[INFO] driver.docker: couldn't re-attach to the plugin process: %v", err)
  1770  		d.logger.Printf("[DEBUG] driver.docker: stopping container %q", pid.ContainerID)
  1771  		if e := client.StopContainer(pid.ContainerID, uint(pid.KillTimeout.Seconds())); e != nil {
  1772  			d.logger.Printf("[DEBUG] driver.docker: couldn't stop container: %v", e)
  1773  		}
  1774  		return nil, err
  1775  	}
  1776  
  1777  	ver, _ := exec.Version()
  1778  	d.logger.Printf("[DEBUG] driver.docker: version of executor: %v", ver.Version)
  1779  
  1780  	// Increment the reference count since we successfully attached to this
  1781  	// container
  1782  	coordinator, callerID := d.getDockerCoordinator(client)
  1783  	coordinator.IncrementImageReference(pid.ImageID, pid.Image, callerID)
  1784  
  1785  	// Return a driver handle
  1786  	h := &DockerHandle{
  1787  		client:         client,
  1788  		waitClient:     waitClient,
  1789  		executor:       exec,
  1790  		pluginClient:   pluginClient,
  1791  		logger:         d.logger,
  1792  		jobName:        d.DriverContext.jobName,
  1793  		taskGroupName:  d.DriverContext.taskGroupName,
  1794  		taskName:       d.DriverContext.taskName,
  1795  		Image:          pid.Image,
  1796  		ImageID:        pid.ImageID,
  1797  		containerID:    pid.ContainerID,
  1798  		version:        pid.Version,
  1799  		killTimeout:    pid.KillTimeout,
  1800  		maxKillTimeout: pid.MaxKillTimeout,
  1801  		doneCh:         make(chan bool),
  1802  		waitCh:         make(chan *dstructs.WaitResult, 1),
  1803  	}
  1804  	go h.collectStats()
  1805  	go h.run()
  1806  	return h, nil
  1807  }
  1808  
  1809  func (h *DockerHandle) ID() string {
  1810  	// Return a handle to the PID
  1811  	pid := dockerPID{
  1812  		Version:        h.version,
  1813  		ContainerID:    h.containerID,
  1814  		Image:          h.Image,
  1815  		ImageID:        h.ImageID,
  1816  		KillTimeout:    h.killTimeout,
  1817  		MaxKillTimeout: h.maxKillTimeout,
  1818  		PluginConfig:   NewPluginReattachConfig(h.pluginClient.ReattachConfig()),
  1819  	}
  1820  	data, err := json.Marshal(pid)
  1821  	if err != nil {
  1822  		h.logger.Printf("[ERR] driver.docker: failed to marshal docker PID to JSON: %s", err)
  1823  	}
  1824  	return fmt.Sprintf("DOCKER:%s", string(data))
  1825  }
  1826  
  1827  func (h *DockerHandle) ContainerID() string {
  1828  	return h.containerID
  1829  }
  1830  
  1831  func (h *DockerHandle) WaitCh() chan *dstructs.WaitResult {
  1832  	return h.waitCh
  1833  }
  1834  
  1835  func (h *DockerHandle) Update(task *structs.Task) error {
  1836  	// Store the updated kill timeout.
  1837  	h.killTimeout = GetKillTimeout(task.KillTimeout, h.maxKillTimeout)
  1838  	if err := h.executor.UpdateTask(task); err != nil {
  1839  		h.logger.Printf("[DEBUG] driver.docker: failed to update log config: %v", err)
  1840  	}
  1841  
  1842  	// Update is not possible
  1843  	return nil
  1844  }
  1845  
  1846  func (h *DockerHandle) Exec(ctx context.Context, cmd string, args []string) ([]byte, int, error) {
  1847  	fullCmd := make([]string, len(args)+1)
  1848  	fullCmd[0] = cmd
  1849  	copy(fullCmd[1:], args)
  1850  	createExecOpts := docker.CreateExecOptions{
  1851  		AttachStdin:  false,
  1852  		AttachStdout: true,
  1853  		AttachStderr: true,
  1854  		Tty:          false,
  1855  		Cmd:          fullCmd,
  1856  		Container:    h.containerID,
  1857  		Context:      ctx,
  1858  	}
  1859  	exec, err := h.client.CreateExec(createExecOpts)
  1860  	if err != nil {
  1861  		return nil, 0, err
  1862  	}
  1863  
  1864  	output, _ := circbuf.NewBuffer(int64(dstructs.CheckBufSize))
  1865  	startOpts := docker.StartExecOptions{
  1866  		Detach:       false,
  1867  		Tty:          false,
  1868  		OutputStream: output,
  1869  		ErrorStream:  output,
  1870  		Context:      ctx,
  1871  	}
  1872  	if err := client.StartExec(exec.ID, startOpts); err != nil {
  1873  		return nil, 0, err
  1874  	}
  1875  	res, err := client.InspectExec(exec.ID)
  1876  	if err != nil {
  1877  		return output.Bytes(), 0, err
  1878  	}
  1879  	return output.Bytes(), res.ExitCode, nil
  1880  }
  1881  
  1882  func (h *DockerHandle) Signal(s os.Signal) error {
  1883  	// Convert types
  1884  	sysSig, ok := s.(syscall.Signal)
  1885  	if !ok {
  1886  		return fmt.Errorf("Failed to determine signal number")
  1887  	}
  1888  
  1889  	// TODO When we expose signals we will need a mapping layer that converts
  1890  	// MacOS signals to the correct signal number for docker. Or we change the
  1891  	// interface to take a signal string and leave it up to driver to map?
  1892  
  1893  	dockerSignal := docker.Signal(sysSig)
  1894  	opts := docker.KillContainerOptions{
  1895  		ID:     h.containerID,
  1896  		Signal: dockerSignal,
  1897  	}
  1898  	return h.client.KillContainer(opts)
  1899  
  1900  }
  1901  
  1902  // Kill is used to terminate the task. This uses `docker stop -t killTimeout`
  1903  func (h *DockerHandle) Kill() error {
  1904  	// Stop the container
  1905  	err := h.waitClient.StopContainer(h.containerID, uint(h.killTimeout.Seconds()))
  1906  	if err != nil {
  1907  		h.executor.Exit()
  1908  		h.pluginClient.Kill()
  1909  
  1910  		// Container has already been removed.
  1911  		if strings.Contains(err.Error(), NoSuchContainerError) {
  1912  			h.logger.Printf("[DEBUG] driver.docker: attempted to stop nonexistent container %s", h.containerID)
  1913  			return nil
  1914  		}
  1915  		h.logger.Printf("[ERR] driver.docker: failed to stop container %s: %v", h.containerID, err)
  1916  		return fmt.Errorf("Failed to stop container %s: %s", h.containerID, err)
  1917  	}
  1918  	h.logger.Printf("[INFO] driver.docker: stopped container %s", h.containerID)
  1919  	return nil
  1920  }
  1921  
  1922  func (h *DockerHandle) Stats() (*cstructs.TaskResourceUsage, error) {
  1923  	h.resourceUsageLock.RLock()
  1924  	defer h.resourceUsageLock.RUnlock()
  1925  	var err error
  1926  	if h.resourceUsage == nil {
  1927  		err = fmt.Errorf("stats collection hasn't started yet")
  1928  	}
  1929  	return h.resourceUsage, err
  1930  }
  1931  
  1932  func (h *DockerHandle) run() {
  1933  	// Wait for it...
  1934  	exitCode, werr := h.waitClient.WaitContainer(h.containerID)
  1935  	if werr != nil {
  1936  		h.logger.Printf("[ERR] driver.docker: failed to wait for %s; container already terminated", h.containerID)
  1937  	}
  1938  
  1939  	if exitCode != 0 {
  1940  		werr = fmt.Errorf("Docker container exited with non-zero exit code: %d", exitCode)
  1941  	}
  1942  
  1943  	container, ierr := h.waitClient.InspectContainer(h.containerID)
  1944  	if ierr != nil {
  1945  		h.logger.Printf("[ERR] driver.docker: failed to inspect container %s: %v", h.containerID, ierr)
  1946  	} else if container.State.OOMKilled {
  1947  		werr = fmt.Errorf("OOM Killed")
  1948  		labels := []metrics.Label{
  1949  			{
  1950  				Name:  "job",
  1951  				Value: h.jobName,
  1952  			},
  1953  			{
  1954  				Name:  "task_group",
  1955  				Value: h.taskGroupName,
  1956  			},
  1957  			{
  1958  				Name:  "task",
  1959  				Value: h.taskName,
  1960  			},
  1961  		}
  1962  		metrics.IncrCounterWithLabels([]string{"driver", "docker", "oom"}, 1, labels)
  1963  	}
  1964  
  1965  	close(h.doneCh)
  1966  
  1967  	// Shutdown the syslog collector
  1968  	if err := h.executor.Exit(); err != nil {
  1969  		h.logger.Printf("[ERR] driver.docker: failed to kill the syslog collector: %v", err)
  1970  	}
  1971  	h.pluginClient.Kill()
  1972  
  1973  	// Stop the container just incase the docker daemon's wait returned
  1974  	// incorrectly
  1975  	if err := h.client.StopContainer(h.containerID, 0); err != nil {
  1976  		_, noSuchContainer := err.(*docker.NoSuchContainer)
  1977  		_, containerNotRunning := err.(*docker.ContainerNotRunning)
  1978  		if !containerNotRunning && !noSuchContainer {
  1979  			h.logger.Printf("[ERR] driver.docker: error stopping container: %v", err)
  1980  		}
  1981  	}
  1982  
  1983  	// Remove the container
  1984  	if h.removeContainerOnExit == true {
  1985  		if err := h.client.RemoveContainer(docker.RemoveContainerOptions{ID: h.containerID, RemoveVolumes: true, Force: true}); err != nil {
  1986  			h.logger.Printf("[ERR] driver.docker: error removing container: %v", err)
  1987  		}
  1988  	} else {
  1989  		h.logger.Printf("[DEBUG] driver.docker: not removing container %v because of config", h.containerID)
  1990  	}
  1991  
  1992  	// Send the results
  1993  	h.waitCh <- dstructs.NewWaitResult(exitCode, 0, werr)
  1994  	close(h.waitCh)
  1995  }
  1996  
  1997  // collectStats starts collecting resource usage stats of a docker container
  1998  func (h *DockerHandle) collectStats() {
  1999  	statsCh := make(chan *docker.Stats)
  2000  	statsOpts := docker.StatsOptions{ID: h.containerID, Done: h.doneCh, Stats: statsCh, Stream: true}
  2001  	go func() {
  2002  		//TODO handle Stats error
  2003  		if err := h.waitClient.Stats(statsOpts); err != nil {
  2004  			h.logger.Printf("[DEBUG] driver.docker: error collecting stats from container %s: %v", h.containerID, err)
  2005  		}
  2006  	}()
  2007  	numCores := runtime.NumCPU()
  2008  	for {
  2009  		select {
  2010  		case s := <-statsCh:
  2011  			if s != nil {
  2012  				ms := &cstructs.MemoryStats{
  2013  					RSS:      s.MemoryStats.Stats.Rss,
  2014  					Cache:    s.MemoryStats.Stats.Cache,
  2015  					Swap:     s.MemoryStats.Stats.Swap,
  2016  					MaxUsage: s.MemoryStats.MaxUsage,
  2017  					Measured: DockerMeasuredMemStats,
  2018  				}
  2019  
  2020  				cs := &cstructs.CpuStats{
  2021  					ThrottledPeriods: s.CPUStats.ThrottlingData.ThrottledPeriods,
  2022  					ThrottledTime:    s.CPUStats.ThrottlingData.ThrottledTime,
  2023  					Measured:         DockerMeasuredCpuStats,
  2024  				}
  2025  
  2026  				// Calculate percentage
  2027  				cs.Percent = calculatePercent(
  2028  					s.CPUStats.CPUUsage.TotalUsage, s.PreCPUStats.CPUUsage.TotalUsage,
  2029  					s.CPUStats.SystemCPUUsage, s.PreCPUStats.SystemCPUUsage, numCores)
  2030  				cs.SystemMode = calculatePercent(
  2031  					s.CPUStats.CPUUsage.UsageInKernelmode, s.PreCPUStats.CPUUsage.UsageInKernelmode,
  2032  					s.CPUStats.CPUUsage.TotalUsage, s.PreCPUStats.CPUUsage.TotalUsage, numCores)
  2033  				cs.UserMode = calculatePercent(
  2034  					s.CPUStats.CPUUsage.UsageInUsermode, s.PreCPUStats.CPUUsage.UsageInUsermode,
  2035  					s.CPUStats.CPUUsage.TotalUsage, s.PreCPUStats.CPUUsage.TotalUsage, numCores)
  2036  				cs.TotalTicks = (cs.Percent / 100) * shelpers.TotalTicksAvailable() / float64(numCores)
  2037  
  2038  				h.resourceUsageLock.Lock()
  2039  				h.resourceUsage = &cstructs.TaskResourceUsage{
  2040  					ResourceUsage: &cstructs.ResourceUsage{
  2041  						MemoryStats: ms,
  2042  						CpuStats:    cs,
  2043  					},
  2044  					Timestamp: s.Read.UTC().UnixNano(),
  2045  				}
  2046  				h.resourceUsageLock.Unlock()
  2047  			}
  2048  		case <-h.doneCh:
  2049  			return
  2050  		}
  2051  	}
  2052  }
  2053  
  2054  func calculatePercent(newSample, oldSample, newTotal, oldTotal uint64, cores int) float64 {
  2055  	numerator := newSample - oldSample
  2056  	denom := newTotal - oldTotal
  2057  	if numerator <= 0 || denom <= 0 {
  2058  		return 0.0
  2059  	}
  2060  
  2061  	return (float64(numerator) / float64(denom)) * float64(cores) * 100.0
  2062  }
  2063  
  2064  // loadDockerConfig loads the docker config at the specified path, returning an
  2065  // error if it couldn't be read.
  2066  func loadDockerConfig(file string) (*configfile.ConfigFile, error) {
  2067  	f, err := os.Open(file)
  2068  	if err != nil {
  2069  		return nil, fmt.Errorf("Failed to open auth config file: %v, error: %v", file, err)
  2070  	}
  2071  	defer f.Close()
  2072  
  2073  	cfile := new(configfile.ConfigFile)
  2074  	if err = cfile.LoadFromReader(f); err != nil {
  2075  		return nil, fmt.Errorf("Failed to parse auth config file: %v", err)
  2076  	}
  2077  	return cfile, nil
  2078  }
  2079  
  2080  // parseRepositoryInfo takes a repo and returns the Docker RepositoryInfo. This
  2081  // is useful for interacting with a Docker config object.
  2082  func parseRepositoryInfo(repo string) (*registry.RepositoryInfo, error) {
  2083  	name, err := reference.ParseNamed(repo)
  2084  	if err != nil {
  2085  		return nil, fmt.Errorf("Failed to parse named repo %q: %v", repo, err)
  2086  	}
  2087  
  2088  	repoInfo, err := registry.ParseRepositoryInfo(name)
  2089  	if err != nil {
  2090  		return nil, fmt.Errorf("Failed to parse repository: %v", err)
  2091  	}
  2092  
  2093  	return repoInfo, nil
  2094  }
  2095  
  2096  // firstValidAuth tries a list of auth backends, returning first error or AuthConfiguration
  2097  func firstValidAuth(repo string, backends []authBackend) (*docker.AuthConfiguration, error) {
  2098  	for _, backend := range backends {
  2099  		auth, err := backend(repo)
  2100  		if auth != nil || err != nil {
  2101  			return auth, err
  2102  		}
  2103  	}
  2104  	return nil, nil
  2105  }
  2106  
  2107  // authFromTaskConfig generates an authBackend for any auth given in the task-configuration
  2108  func authFromTaskConfig(driverConfig *DockerDriverConfig) authBackend {
  2109  	return func(string) (*docker.AuthConfiguration, error) {
  2110  		if len(driverConfig.Auth) == 0 {
  2111  			return nil, nil
  2112  		}
  2113  		auth := driverConfig.Auth[0]
  2114  		return &docker.AuthConfiguration{
  2115  			Username:      auth.Username,
  2116  			Password:      auth.Password,
  2117  			Email:         auth.Email,
  2118  			ServerAddress: auth.ServerAddress,
  2119  		}, nil
  2120  	}
  2121  }
  2122  
  2123  // authFromDockerConfig generate an authBackend for a dockercfg-compatible file.
  2124  // The authBacken can either be from explicit auth definitions or via credential
  2125  // helpers
  2126  func authFromDockerConfig(file string) authBackend {
  2127  	return func(repo string) (*docker.AuthConfiguration, error) {
  2128  		if file == "" {
  2129  			return nil, nil
  2130  		}
  2131  		repoInfo, err := parseRepositoryInfo(repo)
  2132  		if err != nil {
  2133  			return nil, err
  2134  		}
  2135  
  2136  		cfile, err := loadDockerConfig(file)
  2137  		if err != nil {
  2138  			return nil, err
  2139  		}
  2140  
  2141  		return firstValidAuth(repo, []authBackend{
  2142  			func(string) (*docker.AuthConfiguration, error) {
  2143  				dockerAuthConfig := registry.ResolveAuthConfig(cfile.AuthConfigs, repoInfo.Index)
  2144  				auth := &docker.AuthConfiguration{
  2145  					Username:      dockerAuthConfig.Username,
  2146  					Password:      dockerAuthConfig.Password,
  2147  					Email:         dockerAuthConfig.Email,
  2148  					ServerAddress: dockerAuthConfig.ServerAddress,
  2149  				}
  2150  				if authIsEmpty(auth) {
  2151  					return nil, nil
  2152  				}
  2153  				return auth, nil
  2154  			},
  2155  			authFromHelper(cfile.CredentialHelpers[registry.GetAuthConfigKey(repoInfo.Index)]),
  2156  			authFromHelper(cfile.CredentialsStore),
  2157  		})
  2158  	}
  2159  }
  2160  
  2161  // authFromHelper generates an authBackend for a docker-credentials-helper;
  2162  // A script taking the requested domain on input, outputting JSON with
  2163  // "Username" and "Secret"
  2164  func authFromHelper(helperName string) authBackend {
  2165  	return func(repo string) (*docker.AuthConfiguration, error) {
  2166  		if helperName == "" {
  2167  			return nil, nil
  2168  		}
  2169  		helper := dockerAuthHelperPrefix + helperName
  2170  		cmd := exec.Command(helper, "get")
  2171  
  2172  		repoParsed, err := reference.ParseNamed(repo)
  2173  		if err != nil {
  2174  			return nil, err
  2175  		}
  2176  
  2177  		// Ensure that the HTTPs prefix exists
  2178  		repoAddr := fmt.Sprintf("https://%s", repoParsed.Hostname())
  2179  
  2180  		cmd.Stdin = strings.NewReader(repoAddr)
  2181  		output, err := cmd.Output()
  2182  		if err != nil {
  2183  			switch err.(type) {
  2184  			default:
  2185  				return nil, err
  2186  			case *exec.ExitError:
  2187  				return nil, fmt.Errorf("%s with input %q failed with stderr: %s", helper, repo, output)
  2188  			}
  2189  		}
  2190  
  2191  		var response map[string]string
  2192  		if err := json.Unmarshal(output, &response); err != nil {
  2193  			return nil, err
  2194  		}
  2195  
  2196  		auth := &docker.AuthConfiguration{
  2197  			Username: response["Username"],
  2198  			Password: response["Secret"],
  2199  		}
  2200  
  2201  		if authIsEmpty(auth) {
  2202  			return nil, nil
  2203  		}
  2204  		return auth, nil
  2205  	}
  2206  }
  2207  
  2208  // authIsEmpty returns if auth is nil or an empty structure
  2209  func authIsEmpty(auth *docker.AuthConfiguration) bool {
  2210  	if auth == nil {
  2211  		return false
  2212  	}
  2213  	return auth.Username == "" &&
  2214  		auth.Password == "" &&
  2215  		auth.Email == "" &&
  2216  		auth.ServerAddress == ""
  2217  }
  2218  
  2219  // createContainerClient is the subset of Docker Client methods used by the
  2220  // createContainer method to ease testing subtle error conditions.
  2221  type createContainerClient interface {
  2222  	CreateContainer(docker.CreateContainerOptions) (*docker.Container, error)
  2223  	InspectContainer(id string) (*docker.Container, error)
  2224  	ListContainers(docker.ListContainersOptions) ([]docker.APIContainers, error)
  2225  	RemoveContainer(opts docker.RemoveContainerOptions) error
  2226  }
  2227  
  2228  func parseDockerImage(image string) (repo, tag string) {
  2229  	repo, tag = docker.ParseRepositoryTag(image)
  2230  	if tag != "" {
  2231  		return repo, tag
  2232  	}
  2233  	if i := strings.IndexRune(image, '@'); i > -1 { // Has digest (@sha256:...)
  2234  		// when pulling images with a digest, the repository contains the sha hash, and the tag is empty
  2235  		// see: https://github.com/fsouza/go-dockerclient/blob/master/image_test.go#L471
  2236  		repo = image
  2237  	} else {
  2238  		tag = "latest"
  2239  	}
  2240  	return repo, tag
  2241  }
  2242  
  2243  func dockerImageRef(repo string, tag string) string {
  2244  	if tag == "" {
  2245  		return repo
  2246  	}
  2247  	return fmt.Sprintf("%s:%s", repo, tag)
  2248  }