github.com/fabiokung/docker@v0.11.2-0.20170222101415-4534dcd49497/libcontainerd/client_windows.go (about) 1 package libcontainerd 2 3 import ( 4 "errors" 5 "fmt" 6 "io" 7 "io/ioutil" 8 "os" 9 "path/filepath" 10 "strings" 11 "syscall" 12 13 "golang.org/x/net/context" 14 15 "github.com/Microsoft/hcsshim" 16 "github.com/Sirupsen/logrus" 17 "github.com/docker/docker/pkg/sysinfo" 18 specs "github.com/opencontainers/runtime-spec/specs-go" 19 ) 20 21 type client struct { 22 clientCommon 23 24 // Platform specific properties below here (none presently on Windows) 25 } 26 27 // Win32 error codes that are used for various workarounds 28 // These really should be ALL_CAPS to match golangs syscall library and standard 29 // Win32 error conventions, but golint insists on CamelCase. 30 const ( 31 CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string 32 ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started 33 ErrorBadPathname = syscall.Errno(161) // The specified path is invalid 34 ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object 35 ) 36 37 // defaultOwner is a tag passed to HCS to allow it to differentiate between 38 // container creator management stacks. We hard code "docker" in the case 39 // of docker. 40 const defaultOwner = "docker" 41 42 // Create is the entrypoint to create a container from a spec, and if successfully 43 // created, start it too. Table below shows the fields required for HCS JSON calling parameters, 44 // where if not populated, is omitted. 45 // +-----------------+--------------------------------------------+---------------------------------------------------+ 46 // | | Isolation=Process | Isolation=Hyper-V | 47 // +-----------------+--------------------------------------------+---------------------------------------------------+ 48 // | VolumePath | \\?\\Volume{GUIDa} | | 49 // | LayerFolderPath | %root%\windowsfilter\containerID | %root%\windowsfilter\containerID (servicing only) | 50 // | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID | 51 // | SandboxPath | | %root%\windowsfilter | 52 // | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM | 53 // +-----------------+--------------------------------------------+---------------------------------------------------+ 54 // 55 // Isolation=Process example: 56 // 57 // { 58 // "SystemType": "Container", 59 // "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", 60 // "Owner": "docker", 61 // "IsDummy": false, 62 // "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}", 63 // "IgnoreFlushesDuringBoot": true, 64 // "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", 65 // "Layers": [{ 66 // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", 67 // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" 68 // }], 69 // "HostName": "5e0055c814a6", 70 // "MappedDirectories": [], 71 // "HvPartition": false, 72 // "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"], 73 // "Servicing": false 74 //} 75 // 76 // Isolation=Hyper-V example: 77 // 78 //{ 79 // "SystemType": "Container", 80 // "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d", 81 // "Owner": "docker", 82 // "IsDummy": false, 83 // "IgnoreFlushesDuringBoot": true, 84 // "Layers": [{ 85 // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", 86 // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" 87 // }], 88 // "HostName": "475c2c58933b", 89 // "MappedDirectories": [], 90 // "SandboxPath": "C:\\\\control\\\\windowsfilter", 91 // "HvPartition": true, 92 // "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"], 93 // "DNSSearchList": "a.com,b.com,c.com", 94 // "HvRuntime": { 95 // "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM" 96 // }, 97 // "Servicing": false 98 //} 99 func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error { 100 clnt.lock(containerID) 101 defer clnt.unlock(containerID) 102 logrus.Debugln("libcontainerd: client.Create() with spec", spec) 103 104 configuration := &hcsshim.ContainerConfig{ 105 SystemType: "Container", 106 Name: containerID, 107 Owner: defaultOwner, 108 IgnoreFlushesDuringBoot: false, 109 HostName: spec.Hostname, 110 HvPartition: false, 111 } 112 113 if spec.Windows.Resources != nil { 114 if spec.Windows.Resources.CPU != nil { 115 if spec.Windows.Resources.CPU.Count != nil { 116 // This check is being done here rather than in adaptContainerSettings 117 // because we don't want to update the HostConfig in case this container 118 // is moved to a host with more CPUs than this one. 119 cpuCount := *spec.Windows.Resources.CPU.Count 120 hostCPUCount := uint64(sysinfo.NumCPU()) 121 if cpuCount > hostCPUCount { 122 logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount) 123 cpuCount = hostCPUCount 124 } 125 configuration.ProcessorCount = uint32(cpuCount) 126 } 127 if spec.Windows.Resources.CPU.Shares != nil { 128 configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares) 129 } 130 if spec.Windows.Resources.CPU.Percent != nil { 131 configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Percent) * 100 // ProcessorMaximum is a value between 1 and 10000 132 } 133 } 134 if spec.Windows.Resources.Memory != nil { 135 if spec.Windows.Resources.Memory.Limit != nil { 136 configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024 137 } 138 } 139 if spec.Windows.Resources.Storage != nil { 140 if spec.Windows.Resources.Storage.Bps != nil { 141 configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps 142 } 143 if spec.Windows.Resources.Storage.Iops != nil { 144 configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops 145 } 146 } 147 } 148 149 var layerOpt *LayerOption 150 for _, option := range options { 151 if s, ok := option.(*ServicingOption); ok { 152 configuration.Servicing = s.IsServicing 153 continue 154 } 155 if f, ok := option.(*FlushOption); ok { 156 configuration.IgnoreFlushesDuringBoot = f.IgnoreFlushesDuringBoot 157 continue 158 } 159 if h, ok := option.(*HyperVIsolationOption); ok { 160 configuration.HvPartition = h.IsHyperV 161 configuration.SandboxPath = h.SandboxPath 162 continue 163 } 164 if l, ok := option.(*LayerOption); ok { 165 layerOpt = l 166 } 167 if n, ok := option.(*NetworkEndpointsOption); ok { 168 configuration.EndpointList = n.Endpoints 169 configuration.AllowUnqualifiedDNSQuery = n.AllowUnqualifiedDNSQuery 170 if n.DNSSearchList != nil { 171 configuration.DNSSearchList = strings.Join(n.DNSSearchList, ",") 172 } 173 continue 174 } 175 if c, ok := option.(*CredentialsOption); ok { 176 configuration.Credentials = c.Credentials 177 continue 178 } 179 } 180 181 // We must have a layer option with at least one path 182 if layerOpt == nil || layerOpt.LayerPaths == nil { 183 return fmt.Errorf("no layer option or paths were supplied to the runtime") 184 } 185 186 if configuration.HvPartition { 187 // Find the upper-most utility VM image, since the utility VM does not 188 // use layering in RS1. 189 // TODO @swernli/jhowardmsft at some point post RS1 this may be re-locatable. 190 var uvmImagePath string 191 for _, path := range layerOpt.LayerPaths { 192 fullPath := filepath.Join(path, "UtilityVM") 193 _, err := os.Stat(fullPath) 194 if err == nil { 195 uvmImagePath = fullPath 196 break 197 } 198 if !os.IsNotExist(err) { 199 return err 200 } 201 } 202 if uvmImagePath == "" { 203 return errors.New("utility VM image could not be found") 204 } 205 configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath} 206 } else { 207 configuration.VolumePath = spec.Root.Path 208 } 209 210 configuration.LayerFolderPath = layerOpt.LayerFolderPath 211 212 for _, layerPath := range layerOpt.LayerPaths { 213 _, filename := filepath.Split(layerPath) 214 g, err := hcsshim.NameToGuid(filename) 215 if err != nil { 216 return err 217 } 218 configuration.Layers = append(configuration.Layers, hcsshim.Layer{ 219 ID: g.ToString(), 220 Path: layerPath, 221 }) 222 } 223 224 // Add the mounts (volumes, bind mounts etc) to the structure 225 mds := make([]hcsshim.MappedDir, len(spec.Mounts)) 226 for i, mount := range spec.Mounts { 227 mds[i] = hcsshim.MappedDir{ 228 HostPath: mount.Source, 229 ContainerPath: mount.Destination, 230 ReadOnly: false, 231 } 232 for _, o := range mount.Options { 233 if strings.ToLower(o) == "ro" { 234 mds[i].ReadOnly = true 235 } 236 } 237 } 238 configuration.MappedDirectories = mds 239 240 hcsContainer, err := hcsshim.CreateContainer(containerID, configuration) 241 if err != nil { 242 return err 243 } 244 245 // Construct a container object for calling start on it. 246 container := &container{ 247 containerCommon: containerCommon{ 248 process: process{ 249 processCommon: processCommon{ 250 containerID: containerID, 251 client: clnt, 252 friendlyName: InitFriendlyName, 253 }, 254 }, 255 processes: make(map[string]*process), 256 }, 257 ociSpec: spec, 258 hcsContainer: hcsContainer, 259 } 260 261 container.options = options 262 for _, option := range options { 263 if err := option.Apply(container); err != nil { 264 logrus.Errorf("libcontainerd: %v", err) 265 } 266 } 267 268 // Call start, and if it fails, delete the container from our 269 // internal structure, start will keep HCS in sync by deleting the 270 // container there. 271 logrus.Debugf("libcontainerd: Create() id=%s, Calling start()", containerID) 272 if err := container.start(attachStdio); err != nil { 273 clnt.deleteContainer(containerID) 274 return err 275 } 276 277 logrus.Debugf("libcontainerd: Create() id=%s completed successfully", containerID) 278 return nil 279 280 } 281 282 // AddProcess is the handler for adding a process to an already running 283 // container. It's called through docker exec. It returns the system pid of the 284 // exec'd process. 285 func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, procToAdd Process, attachStdio StdioCallback) (int, error) { 286 clnt.lock(containerID) 287 defer clnt.unlock(containerID) 288 container, err := clnt.getContainer(containerID) 289 if err != nil { 290 return -1, err 291 } 292 // Note we always tell HCS to 293 // create stdout as it's required regardless of '-i' or '-t' options, so that 294 // docker can always grab the output through logs. We also tell HCS to always 295 // create stdin, even if it's not used - it will be closed shortly. Stderr 296 // is only created if it we're not -t. 297 createProcessParms := hcsshim.ProcessConfig{ 298 EmulateConsole: procToAdd.Terminal, 299 CreateStdInPipe: true, 300 CreateStdOutPipe: true, 301 CreateStdErrPipe: !procToAdd.Terminal, 302 } 303 createProcessParms.ConsoleSize[0] = uint(procToAdd.ConsoleSize.Height) 304 createProcessParms.ConsoleSize[1] = uint(procToAdd.ConsoleSize.Width) 305 306 // Take working directory from the process to add if it is defined, 307 // otherwise take from the first process. 308 if procToAdd.Cwd != "" { 309 createProcessParms.WorkingDirectory = procToAdd.Cwd 310 } else { 311 createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd 312 } 313 314 // Configure the environment for the process 315 createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env) 316 createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ") 317 createProcessParms.User = procToAdd.User.Username 318 319 logrus.Debugf("libcontainerd: commandLine: %s", createProcessParms.CommandLine) 320 321 // Start the command running in the container. 322 var stdout, stderr io.ReadCloser 323 var stdin io.WriteCloser 324 newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms) 325 if err != nil { 326 logrus.Errorf("libcontainerd: AddProcess(%s) CreateProcess() failed %s", containerID, err) 327 return -1, err 328 } 329 330 pid := newProcess.Pid() 331 332 stdin, stdout, stderr, err = newProcess.Stdio() 333 if err != nil { 334 logrus.Errorf("libcontainerd: %s getting std pipes failed %s", containerID, err) 335 return -1, err 336 } 337 338 iopipe := &IOPipe{Terminal: procToAdd.Terminal} 339 iopipe.Stdin = createStdInCloser(stdin, newProcess) 340 341 // Convert io.ReadClosers to io.Readers 342 if stdout != nil { 343 iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout}) 344 } 345 if stderr != nil { 346 iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr}) 347 } 348 349 proc := &process{ 350 processCommon: processCommon{ 351 containerID: containerID, 352 friendlyName: processFriendlyName, 353 client: clnt, 354 systemPid: uint32(pid), 355 }, 356 hcsProcess: newProcess, 357 } 358 359 // Add the process to the container's list of processes 360 container.processes[processFriendlyName] = proc 361 362 // Tell the engine to attach streams back to the client 363 if err := attachStdio(*iopipe); err != nil { 364 return -1, err 365 } 366 367 // Spin up a go routine waiting for exit to handle cleanup 368 go container.waitExit(proc, false) 369 370 return pid, nil 371 } 372 373 // Signal handles `docker stop` on Windows. While Linux has support for 374 // the full range of signals, signals aren't really implemented on Windows. 375 // We fake supporting regular stop and -9 to force kill. 376 func (clnt *client) Signal(containerID string, sig int) error { 377 var ( 378 cont *container 379 err error 380 ) 381 382 // Get the container as we need it to get the container handle. 383 clnt.lock(containerID) 384 defer clnt.unlock(containerID) 385 if cont, err = clnt.getContainer(containerID); err != nil { 386 return err 387 } 388 389 cont.manualStopRequested = true 390 391 logrus.Debugf("libcontainerd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid) 392 393 if syscall.Signal(sig) == syscall.SIGKILL { 394 // Terminate the compute system 395 if err := cont.hcsContainer.Terminate(); err != nil { 396 if !hcsshim.IsPending(err) { 397 logrus.Errorf("libcontainerd: failed to terminate %s - %q", containerID, err) 398 } 399 } 400 } else { 401 // Shut down the container 402 if err := cont.hcsContainer.Shutdown(); err != nil { 403 if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) { 404 // ignore errors 405 logrus.Warnf("libcontainerd: failed to shutdown container %s: %q", containerID, err) 406 } 407 } 408 } 409 410 return nil 411 } 412 413 // While Linux has support for the full range of signals, signals aren't really implemented on Windows. 414 // We try to terminate the specified process whatever signal is requested. 415 func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error { 416 clnt.lock(containerID) 417 defer clnt.unlock(containerID) 418 cont, err := clnt.getContainer(containerID) 419 if err != nil { 420 return err 421 } 422 423 for _, p := range cont.processes { 424 if p.friendlyName == processFriendlyName { 425 return p.hcsProcess.Kill() 426 } 427 } 428 429 return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID) 430 } 431 432 // Resize handles a CLI event to resize an interactive docker run or docker exec 433 // window. 434 func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { 435 // Get the libcontainerd container object 436 clnt.lock(containerID) 437 defer clnt.unlock(containerID) 438 cont, err := clnt.getContainer(containerID) 439 if err != nil { 440 return err 441 } 442 443 h, w := uint16(height), uint16(width) 444 445 if processFriendlyName == InitFriendlyName { 446 logrus.Debugln("libcontainerd: resizing systemPID in", containerID, cont.process.systemPid) 447 return cont.process.hcsProcess.ResizeConsole(w, h) 448 } 449 450 for _, p := range cont.processes { 451 if p.friendlyName == processFriendlyName { 452 logrus.Debugln("libcontainerd: resizing exec'd process", containerID, p.systemPid) 453 return p.hcsProcess.ResizeConsole(w, h) 454 } 455 } 456 457 return fmt.Errorf("Resize could not find containerID %s to resize", containerID) 458 459 } 460 461 // Pause handles pause requests for containers 462 func (clnt *client) Pause(containerID string) error { 463 unlockContainer := true 464 // Get the libcontainerd container object 465 clnt.lock(containerID) 466 defer func() { 467 if unlockContainer { 468 clnt.unlock(containerID) 469 } 470 }() 471 container, err := clnt.getContainer(containerID) 472 if err != nil { 473 return err 474 } 475 476 for _, option := range container.options { 477 if h, ok := option.(*HyperVIsolationOption); ok { 478 if !h.IsHyperV { 479 return errors.New("cannot pause Windows Server Containers") 480 } 481 break 482 } 483 } 484 485 err = container.hcsContainer.Pause() 486 if err != nil { 487 return err 488 } 489 490 // Unlock container before calling back into the daemon 491 unlockContainer = false 492 clnt.unlock(containerID) 493 494 return clnt.backend.StateChanged(containerID, StateInfo{ 495 CommonStateInfo: CommonStateInfo{ 496 State: StatePause, 497 }}) 498 } 499 500 // Resume handles resume requests for containers 501 func (clnt *client) Resume(containerID string) error { 502 unlockContainer := true 503 // Get the libcontainerd container object 504 clnt.lock(containerID) 505 defer func() { 506 if unlockContainer { 507 clnt.unlock(containerID) 508 } 509 }() 510 container, err := clnt.getContainer(containerID) 511 if err != nil { 512 return err 513 } 514 515 // This should never happen, since Windows Server Containers cannot be paused 516 for _, option := range container.options { 517 if h, ok := option.(*HyperVIsolationOption); ok { 518 if !h.IsHyperV { 519 return errors.New("cannot resume Windows Server Containers") 520 } 521 break 522 } 523 } 524 525 err = container.hcsContainer.Resume() 526 if err != nil { 527 return err 528 } 529 530 // Unlock container before calling back into the daemon 531 unlockContainer = false 532 clnt.unlock(containerID) 533 534 return clnt.backend.StateChanged(containerID, StateInfo{ 535 CommonStateInfo: CommonStateInfo{ 536 State: StateResume, 537 }}) 538 } 539 540 // Stats handles stats requests for containers 541 func (clnt *client) Stats(containerID string) (*Stats, error) { 542 // Get the libcontainerd container object 543 clnt.lock(containerID) 544 defer clnt.unlock(containerID) 545 container, err := clnt.getContainer(containerID) 546 if err != nil { 547 return nil, err 548 } 549 s, err := container.hcsContainer.Statistics() 550 if err != nil { 551 return nil, err 552 } 553 st := Stats(s) 554 return &st, nil 555 } 556 557 // Restore is the handler for restoring a container 558 func (clnt *client) Restore(containerID string, _ StdioCallback, unusedOnWindows ...CreateOption) error { 559 // TODO Windows: Implement this. For now, just tell the backend the container exited. 560 logrus.Debugf("libcontainerd: Restore(%s)", containerID) 561 return clnt.backend.StateChanged(containerID, StateInfo{ 562 CommonStateInfo: CommonStateInfo{ 563 State: StateExit, 564 ExitCode: 1 << 31, 565 }}) 566 } 567 568 // GetPidsForContainer returns a list of process IDs running in a container. 569 // Not used on Windows. 570 func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { 571 return nil, errors.New("not implemented on Windows") 572 } 573 574 // Summary returns a summary of the processes running in a container. 575 // This is present in Windows to support docker top. In linux, the 576 // engine shells out to ps to get process information. On Windows, as 577 // the containers could be Hyper-V containers, they would not be 578 // visible on the container host. However, libcontainerd does have 579 // that information. 580 func (clnt *client) Summary(containerID string) ([]Summary, error) { 581 582 // Get the libcontainerd container object 583 clnt.lock(containerID) 584 defer clnt.unlock(containerID) 585 container, err := clnt.getContainer(containerID) 586 if err != nil { 587 return nil, err 588 } 589 p, err := container.hcsContainer.ProcessList() 590 if err != nil { 591 return nil, err 592 } 593 pl := make([]Summary, len(p)) 594 for i := range p { 595 pl[i] = Summary(p[i]) 596 } 597 return pl, nil 598 } 599 600 // UpdateResources updates resources for a running container. 601 func (clnt *client) UpdateResources(containerID string, resources Resources) error { 602 // Updating resource isn't supported on Windows 603 // but we should return nil for enabling updating container 604 return nil 605 } 606 607 func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { 608 return errors.New("Windows: Containers do not support checkpoints") 609 } 610 611 func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { 612 return errors.New("Windows: Containers do not support checkpoints") 613 } 614 615 func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { 616 return nil, errors.New("Windows: Containers do not support checkpoints") 617 } 618 619 func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { 620 return &ServerVersion{}, nil 621 }