github.com/gunjan5/docker@v1.8.2/daemon/execdriver/windows/run.go (about) 1 // +build windows 2 3 package windows 4 5 // Note this is alpha code for the bring up of containers on Windows. 6 7 import ( 8 "encoding/json" 9 "errors" 10 "fmt" 11 "strings" 12 13 "github.com/Sirupsen/logrus" 14 "github.com/docker/docker/daemon/execdriver" 15 "github.com/microsoft/hcsshim" 16 "github.com/natefinch/npipe" 17 ) 18 19 type layer struct { 20 Id string 21 Path string 22 } 23 24 type defConfig struct { 25 DefFile string 26 } 27 28 type networkConnection struct { 29 NetworkName string 30 EnableNat bool 31 } 32 type networkSettings struct { 33 MacAddress string 34 } 35 36 type device struct { 37 DeviceType string 38 Connection interface{} 39 Settings interface{} 40 } 41 42 type containerInit struct { 43 SystemType string 44 Name string 45 IsDummy bool 46 VolumePath string 47 Devices []device 48 IgnoreFlushesDuringBoot bool 49 LayerFolderPath string 50 Layers []layer 51 } 52 53 func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { 54 55 var ( 56 term execdriver.Terminal 57 err error 58 inListen, outListen, errListen *npipe.PipeListener 59 ) 60 61 // Make sure the client isn't asking for options which aren't supported 62 err = checkSupportedOptions(c) 63 if err != nil { 64 return execdriver.ExitStatus{ExitCode: -1}, err 65 } 66 67 cu := &containerInit{ 68 SystemType: "Container", 69 Name: c.ID, 70 IsDummy: dummyMode, 71 VolumePath: c.Rootfs, 72 IgnoreFlushesDuringBoot: c.FirstStart, 73 LayerFolderPath: c.LayerFolder, 74 } 75 76 for i := 0; i < len(c.LayerPaths); i++ { 77 cu.Layers = append(cu.Layers, layer{ 78 Id: hcsshim.NewGUID(c.LayerPaths[i]).ToString(), 79 Path: c.LayerPaths[i], 80 }) 81 } 82 83 if c.Network.Interface != nil { 84 dev := device{ 85 DeviceType: "Network", 86 Connection: &networkConnection{ 87 NetworkName: c.Network.Interface.Bridge, 88 EnableNat: false, 89 }, 90 } 91 92 if c.Network.Interface.MacAddress != "" { 93 windowsStyleMAC := strings.Replace( 94 c.Network.Interface.MacAddress, ":", "-", -1) 95 dev.Settings = networkSettings{ 96 MacAddress: windowsStyleMAC, 97 } 98 } 99 100 logrus.Debugf("Virtual switch '%s', mac='%s'", c.Network.Interface.Bridge, c.Network.Interface.MacAddress) 101 102 cu.Devices = append(cu.Devices, dev) 103 } else { 104 logrus.Debugln("No network interface") 105 } 106 107 configurationb, err := json.Marshal(cu) 108 if err != nil { 109 return execdriver.ExitStatus{ExitCode: -1}, err 110 } 111 112 configuration := string(configurationb) 113 114 err = hcsshim.CreateComputeSystem(c.ID, configuration) 115 if err != nil { 116 logrus.Debugln("Failed to create temporary container ", err) 117 return execdriver.ExitStatus{ExitCode: -1}, err 118 } 119 120 // Start the container 121 logrus.Debugln("Starting container ", c.ID) 122 err = hcsshim.StartComputeSystem(c.ID) 123 if err != nil { 124 logrus.Errorf("Failed to start compute system: %s", err) 125 return execdriver.ExitStatus{ExitCode: -1}, err 126 } 127 defer func() { 128 // Stop the container 129 130 if terminateMode { 131 logrus.Debugf("Terminating container %s", c.ID) 132 if err := hcsshim.TerminateComputeSystem(c.ID); err != nil { 133 // IMPORTANT: Don't fail if fails to change state. It could already 134 // have been stopped through kill(). 135 // Otherwise, the docker daemon will hang in job wait() 136 logrus.Warnf("Ignoring error from TerminateComputeSystem %s", err) 137 } 138 } else { 139 logrus.Debugf("Shutting down container %s", c.ID) 140 if err := hcsshim.ShutdownComputeSystem(c.ID); err != nil { 141 // IMPORTANT: Don't fail if fails to change state. It could already 142 // have been stopped through kill(). 143 // Otherwise, the docker daemon will hang in job wait() 144 logrus.Warnf("Ignoring error from ShutdownComputeSystem %s", err) 145 } 146 } 147 }() 148 149 // We use a different pipe name between real and dummy mode in the HCS 150 var serverPipeFormat, clientPipeFormat string 151 if dummyMode { 152 clientPipeFormat = `\\.\pipe\docker-run-%[1]s-%[2]s` 153 serverPipeFormat = clientPipeFormat 154 } else { 155 clientPipeFormat = `\\.\pipe\docker-run-%[2]s` 156 serverPipeFormat = `\\.\Containers\%[1]s\Device\NamedPipe\docker-run-%[2]s` 157 } 158 159 createProcessParms := hcsshim.CreateProcessParams{ 160 EmulateConsole: c.ProcessConfig.Tty, 161 WorkingDirectory: c.WorkingDir, 162 ConsoleSize: c.ProcessConfig.ConsoleSize, 163 } 164 165 // Configure the environment for the process 166 createProcessParms.Environment = setupEnvironmentVariables(c.ProcessConfig.Env) 167 168 // Connect stdin 169 if pipes.Stdin != nil { 170 stdInPipe := fmt.Sprintf(serverPipeFormat, c.ID, "stdin") 171 createProcessParms.StdInPipe = fmt.Sprintf(clientPipeFormat, c.ID, "stdin") 172 173 // Listen on the named pipe 174 inListen, err = npipe.Listen(stdInPipe) 175 if err != nil { 176 logrus.Errorf("stdin failed to listen on %s err=%s", stdInPipe, err) 177 return execdriver.ExitStatus{ExitCode: -1}, err 178 } 179 defer inListen.Close() 180 181 // Launch a goroutine to do the accept. We do this so that we can 182 // cause an otherwise blocking goroutine to gracefully close when 183 // the caller (us) closes the listener 184 go stdinAccept(inListen, stdInPipe, pipes.Stdin) 185 } 186 187 // Connect stdout 188 stdOutPipe := fmt.Sprintf(serverPipeFormat, c.ID, "stdout") 189 createProcessParms.StdOutPipe = fmt.Sprintf(clientPipeFormat, c.ID, "stdout") 190 191 outListen, err = npipe.Listen(stdOutPipe) 192 if err != nil { 193 logrus.Errorf("stdout failed to listen on %s err=%s", stdOutPipe, err) 194 return execdriver.ExitStatus{ExitCode: -1}, err 195 } 196 defer outListen.Close() 197 go stdouterrAccept(outListen, stdOutPipe, pipes.Stdout) 198 199 // No stderr on TTY. 200 if !c.ProcessConfig.Tty { 201 // Connect stderr 202 stdErrPipe := fmt.Sprintf(serverPipeFormat, c.ID, "stderr") 203 createProcessParms.StdErrPipe = fmt.Sprintf(clientPipeFormat, c.ID, "stderr") 204 errListen, err = npipe.Listen(stdErrPipe) 205 if err != nil { 206 logrus.Errorf("stderr failed to listen on %s err=%s", stdErrPipe, err) 207 return execdriver.ExitStatus{ExitCode: -1}, err 208 } 209 defer errListen.Close() 210 go stdouterrAccept(errListen, stdErrPipe, pipes.Stderr) 211 } 212 213 // This should get caught earlier, but just in case - validate that we 214 // have something to run 215 if c.ProcessConfig.Entrypoint == "" { 216 err = errors.New("No entrypoint specified") 217 logrus.Error(err) 218 return execdriver.ExitStatus{ExitCode: -1}, err 219 } 220 221 // Build the command line of the process 222 createProcessParms.CommandLine = c.ProcessConfig.Entrypoint 223 for _, arg := range c.ProcessConfig.Arguments { 224 logrus.Debugln("appending ", arg) 225 createProcessParms.CommandLine += " " + arg 226 } 227 logrus.Debugf("CommandLine: %s", createProcessParms.CommandLine) 228 229 // Start the command running in the container. 230 var pid uint32 231 pid, err = hcsshim.CreateProcessInComputeSystem(c.ID, createProcessParms) 232 233 if err != nil { 234 logrus.Errorf("CreateProcessInComputeSystem() failed %s", err) 235 return execdriver.ExitStatus{ExitCode: -1}, err 236 } 237 238 //Save the PID as we'll need this in Kill() 239 logrus.Debugf("PID %d", pid) 240 c.ContainerPid = int(pid) 241 242 if c.ProcessConfig.Tty { 243 term = NewTtyConsole(c.ID, pid) 244 } else { 245 term = NewStdConsole() 246 } 247 c.ProcessConfig.Terminal = term 248 249 // Maintain our list of active containers. We'll need this later for exec 250 // and other commands. 251 d.Lock() 252 d.activeContainers[c.ID] = &activeContainer{ 253 command: c, 254 } 255 d.Unlock() 256 257 // Invoke the start callback 258 if startCallback != nil { 259 startCallback(&c.ProcessConfig, int(pid)) 260 } 261 262 var exitCode int32 263 exitCode, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid) 264 if err != nil { 265 logrus.Errorf("Failed to WaitForProcessInComputeSystem %s", err) 266 return execdriver.ExitStatus{ExitCode: -1}, err 267 } 268 269 logrus.Debugf("Exiting Run() exitCode %d id=%s", exitCode, c.ID) 270 return execdriver.ExitStatus{ExitCode: int(exitCode)}, nil 271 }