github.com/zhuohuang-hust/src-cbuild@v0.0.0-20230105071821-c7aab3e7c840/libcontainerd/client_windows.go (about) 1 package libcontainerd 2 3 import ( 4 "errors" 5 "fmt" 6 "io" 7 "io/ioutil" 8 "os" 9 "path/filepath" 10 "strings" 11 "syscall" 12 13 "golang.org/x/net/context" 14 15 "github.com/Microsoft/hcsshim" 16 "github.com/Sirupsen/logrus" 17 "github.com/docker/docker/pkg/sysinfo" 18 specs "github.com/opencontainers/runtime-spec/specs-go" 19 ) 20 21 type client struct { 22 clientCommon 23 24 // Platform specific properties below here (none presently on Windows) 25 } 26 27 // Win32 error codes that are used for various workarounds 28 // These really should be ALL_CAPS to match golangs syscall library and standard 29 // Win32 error conventions, but golint insists on CamelCase. 30 const ( 31 CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string 32 ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started 33 ErrorBadPathname = syscall.Errno(161) // The specified path is invalid 34 ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object 35 ) 36 37 // defaultOwner is a tag passed to HCS to allow it to differentiate between 38 // container creator management stacks. We hard code "docker" in the case 39 // of docker. 40 const defaultOwner = "docker" 41 42 // Create is the entrypoint to create a container from a spec, and if successfully 43 // created, start it too. Table below shows the fields required for HCS JSON calling parameters, 44 // where if not populated, is omitted. 45 // +-----------------+--------------------------------------------+---------------------------------------------------+ 46 // | | Isolation=Process | Isolation=Hyper-V | 47 // +-----------------+--------------------------------------------+---------------------------------------------------+ 48 // | VolumePath | \\?\\Volume{GUIDa} | | 49 // | LayerFolderPath | %root%\windowsfilter\containerID | %root%\windowsfilter\containerID (servicing only) | 50 // | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID | 51 // | SandboxPath | | %root%\windowsfilter | 52 // | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM | 53 // +-----------------+--------------------------------------------+---------------------------------------------------+ 54 // 55 // Isolation=Process example: 56 // 57 // { 58 // "SystemType": "Container", 59 // "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", 60 // "Owner": "docker", 61 // "IsDummy": false, 62 // "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}", 63 // "IgnoreFlushesDuringBoot": true, 64 // "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", 65 // "Layers": [{ 66 // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", 67 // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" 68 // }], 69 // "HostName": "5e0055c814a6", 70 // "MappedDirectories": [], 71 // "HvPartition": false, 72 // "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"], 73 // "Servicing": false 74 //} 75 // 76 // Isolation=Hyper-V example: 77 // 78 //{ 79 // "SystemType": "Container", 80 // "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d", 81 // "Owner": "docker", 82 // "IsDummy": false, 83 // "IgnoreFlushesDuringBoot": true, 84 // "Layers": [{ 85 // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", 86 // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" 87 // }], 88 // "HostName": "475c2c58933b", 89 // "MappedDirectories": [], 90 // "SandboxPath": "C:\\\\control\\\\windowsfilter", 91 // "HvPartition": true, 92 // "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"], 93 // "HvRuntime": { 94 // "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM" 95 // }, 96 // "Servicing": false 97 //} 98 func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error { 99 clnt.lock(containerID) 100 defer clnt.unlock(containerID) 101 logrus.Debugln("libcontainerd: client.Create() with spec", spec) 102 103 configuration := &hcsshim.ContainerConfig{ 104 SystemType: "Container", 105 Name: containerID, 106 Owner: defaultOwner, 107 IgnoreFlushesDuringBoot: false, 108 HostName: spec.Hostname, 109 HvPartition: false, 110 } 111 112 if spec.Windows.Resources != nil { 113 if spec.Windows.Resources.CPU != nil { 114 if spec.Windows.Resources.CPU.Count != nil { 115 // This check is being done here rather than in adaptContainerSettings 116 // because we don't want to update the HostConfig in case this container 117 // is moved to a host with more CPUs than this one. 118 cpuCount := *spec.Windows.Resources.CPU.Count 119 hostCPUCount := uint64(sysinfo.NumCPU()) 120 if cpuCount > hostCPUCount { 121 logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount) 122 cpuCount = hostCPUCount 123 } 124 configuration.ProcessorCount = uint32(cpuCount) 125 } 126 if spec.Windows.Resources.CPU.Shares != nil { 127 configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares) 128 } 129 if spec.Windows.Resources.CPU.Percent != nil { 130 configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Percent) * 100 // ProcessorMaximum is a value between 1 and 10000 131 } 132 } 133 if spec.Windows.Resources.Memory != nil { 134 if spec.Windows.Resources.Memory.Limit != nil { 135 configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024 136 } 137 } 138 if spec.Windows.Resources.Storage != nil { 139 if spec.Windows.Resources.Storage.Bps != nil { 140 configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps 141 } 142 if spec.Windows.Resources.Storage.Iops != nil { 143 configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops 144 } 145 } 146 } 147 148 var layerOpt *LayerOption 149 for _, option := range options { 150 if s, ok := option.(*ServicingOption); ok { 151 configuration.Servicing = s.IsServicing 152 continue 153 } 154 if f, ok := option.(*FlushOption); ok { 155 configuration.IgnoreFlushesDuringBoot = f.IgnoreFlushesDuringBoot 156 continue 157 } 158 if h, ok := option.(*HyperVIsolationOption); ok { 159 configuration.HvPartition = h.IsHyperV 160 configuration.SandboxPath = h.SandboxPath 161 continue 162 } 163 if l, ok := option.(*LayerOption); ok { 164 layerOpt = l 165 } 166 if n, ok := option.(*NetworkEndpointsOption); ok { 167 configuration.EndpointList = n.Endpoints 168 configuration.AllowUnqualifiedDNSQuery = n.AllowUnqualifiedDNSQuery 169 continue 170 } 171 if c, ok := option.(*CredentialsOption); ok { 172 configuration.Credentials = c.Credentials 173 continue 174 } 175 } 176 177 // We must have a layer option with at least one path 178 if layerOpt == nil || layerOpt.LayerPaths == nil { 179 return fmt.Errorf("no layer option or paths were supplied to the runtime") 180 } 181 182 if configuration.HvPartition { 183 // Find the upper-most utility VM image, since the utility VM does not 184 // use layering in RS1. 185 // TODO @swernli/jhowardmsft at some point post RS1 this may be re-locatable. 186 var uvmImagePath string 187 for _, path := range layerOpt.LayerPaths { 188 fullPath := filepath.Join(path, "UtilityVM") 189 _, err := os.Stat(fullPath) 190 if err == nil { 191 uvmImagePath = fullPath 192 break 193 } 194 if !os.IsNotExist(err) { 195 return err 196 } 197 } 198 if uvmImagePath == "" { 199 return errors.New("utility VM image could not be found") 200 } 201 configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath} 202 } else { 203 configuration.VolumePath = spec.Root.Path 204 } 205 206 configuration.LayerFolderPath = layerOpt.LayerFolderPath 207 208 for _, layerPath := range layerOpt.LayerPaths { 209 _, filename := filepath.Split(layerPath) 210 g, err := hcsshim.NameToGuid(filename) 211 if err != nil { 212 return err 213 } 214 configuration.Layers = append(configuration.Layers, hcsshim.Layer{ 215 ID: g.ToString(), 216 Path: layerPath, 217 }) 218 } 219 220 // Add the mounts (volumes, bind mounts etc) to the structure 221 mds := make([]hcsshim.MappedDir, len(spec.Mounts)) 222 for i, mount := range spec.Mounts { 223 mds[i] = hcsshim.MappedDir{ 224 HostPath: mount.Source, 225 ContainerPath: mount.Destination, 226 ReadOnly: false, 227 } 228 for _, o := range mount.Options { 229 if strings.ToLower(o) == "ro" { 230 mds[i].ReadOnly = true 231 } 232 } 233 } 234 configuration.MappedDirectories = mds 235 236 hcsContainer, err := hcsshim.CreateContainer(containerID, configuration) 237 if err != nil { 238 return err 239 } 240 241 // Construct a container object for calling start on it. 242 container := &container{ 243 containerCommon: containerCommon{ 244 process: process{ 245 processCommon: processCommon{ 246 containerID: containerID, 247 client: clnt, 248 friendlyName: InitFriendlyName, 249 }, 250 commandLine: strings.Join(spec.Process.Args, " "), 251 }, 252 processes: make(map[string]*process), 253 }, 254 ociSpec: spec, 255 hcsContainer: hcsContainer, 256 } 257 258 container.options = options 259 for _, option := range options { 260 if err := option.Apply(container); err != nil { 261 logrus.Errorf("libcontainerd: %v", err) 262 } 263 } 264 265 // Call start, and if it fails, delete the container from our 266 // internal structure, start will keep HCS in sync by deleting the 267 // container there. 268 logrus.Debugf("libcontainerd: Create() id=%s, Calling start()", containerID) 269 if err := container.start(attachStdio); err != nil { 270 clnt.deleteContainer(containerID) 271 return err 272 } 273 274 logrus.Debugf("libcontainerd: Create() id=%s completed successfully", containerID) 275 return nil 276 277 } 278 279 // AddProcess is the handler for adding a process to an already running 280 // container. It's called through docker exec. It returns the system pid of the 281 // exec'd process. 282 func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, procToAdd Process, attachStdio StdioCallback) (int, error) { 283 clnt.lock(containerID) 284 defer clnt.unlock(containerID) 285 container, err := clnt.getContainer(containerID) 286 if err != nil { 287 return -1, err 288 } 289 // Note we always tell HCS to 290 // create stdout as it's required regardless of '-i' or '-t' options, so that 291 // docker can always grab the output through logs. We also tell HCS to always 292 // create stdin, even if it's not used - it will be closed shortly. Stderr 293 // is only created if it we're not -t. 294 createProcessParms := hcsshim.ProcessConfig{ 295 EmulateConsole: procToAdd.Terminal, 296 CreateStdInPipe: true, 297 CreateStdOutPipe: true, 298 CreateStdErrPipe: !procToAdd.Terminal, 299 } 300 createProcessParms.ConsoleSize[0] = uint(procToAdd.ConsoleSize.Height) 301 createProcessParms.ConsoleSize[1] = uint(procToAdd.ConsoleSize.Width) 302 303 // Take working directory from the process to add if it is defined, 304 // otherwise take from the first process. 305 if procToAdd.Cwd != "" { 306 createProcessParms.WorkingDirectory = procToAdd.Cwd 307 } else { 308 createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd 309 } 310 311 // Configure the environment for the process 312 createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env) 313 createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ") 314 createProcessParms.User = procToAdd.User.Username 315 316 logrus.Debugf("libcontainerd: commandLine: %s", createProcessParms.CommandLine) 317 318 // Start the command running in the container. 319 var stdout, stderr io.ReadCloser 320 var stdin io.WriteCloser 321 newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms) 322 if err != nil { 323 logrus.Errorf("libcontainerd: AddProcess(%s) CreateProcess() failed %s", containerID, err) 324 return -1, err 325 } 326 327 pid := newProcess.Pid() 328 329 stdin, stdout, stderr, err = newProcess.Stdio() 330 if err != nil { 331 logrus.Errorf("libcontainerd: %s getting std pipes failed %s", containerID, err) 332 return -1, err 333 } 334 335 iopipe := &IOPipe{Terminal: procToAdd.Terminal} 336 iopipe.Stdin = createStdInCloser(stdin, newProcess) 337 338 // Convert io.ReadClosers to io.Readers 339 if stdout != nil { 340 iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout}) 341 } 342 if stderr != nil { 343 iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr}) 344 } 345 346 proc := &process{ 347 processCommon: processCommon{ 348 containerID: containerID, 349 friendlyName: processFriendlyName, 350 client: clnt, 351 systemPid: uint32(pid), 352 }, 353 commandLine: createProcessParms.CommandLine, 354 hcsProcess: newProcess, 355 } 356 357 // Add the process to the container's list of processes 358 container.processes[processFriendlyName] = proc 359 360 // Tell the engine to attach streams back to the client 361 if err := attachStdio(*iopipe); err != nil { 362 return -1, err 363 } 364 365 // Spin up a go routine waiting for exit to handle cleanup 366 go container.waitExit(proc, false) 367 368 return pid, nil 369 } 370 371 // Signal handles `docker stop` on Windows. While Linux has support for 372 // the full range of signals, signals aren't really implemented on Windows. 373 // We fake supporting regular stop and -9 to force kill. 374 func (clnt *client) Signal(containerID string, sig int) error { 375 var ( 376 cont *container 377 err error 378 ) 379 380 // Get the container as we need it to get the container handle. 381 clnt.lock(containerID) 382 defer clnt.unlock(containerID) 383 if cont, err = clnt.getContainer(containerID); err != nil { 384 return err 385 } 386 387 cont.manualStopRequested = true 388 389 logrus.Debugf("libcontainerd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid) 390 391 if syscall.Signal(sig) == syscall.SIGKILL { 392 // Terminate the compute system 393 if err := cont.hcsContainer.Terminate(); err != nil { 394 if !hcsshim.IsPending(err) { 395 logrus.Errorf("libcontainerd: failed to terminate %s - %q", containerID, err) 396 } 397 } 398 } else { 399 // Shut down the container 400 if err := cont.hcsContainer.Shutdown(); err != nil { 401 if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) { 402 // ignore errors 403 logrus.Warnf("libcontainerd: failed to shutdown container %s: %q", containerID, err) 404 } 405 } 406 } 407 408 return nil 409 } 410 411 // While Linux has support for the full range of signals, signals aren't really implemented on Windows. 412 // We try to terminate the specified process whatever signal is requested. 413 func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error { 414 clnt.lock(containerID) 415 defer clnt.unlock(containerID) 416 cont, err := clnt.getContainer(containerID) 417 if err != nil { 418 return err 419 } 420 421 for _, p := range cont.processes { 422 if p.friendlyName == processFriendlyName { 423 return p.hcsProcess.Kill() 424 } 425 } 426 427 return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID) 428 } 429 430 // Resize handles a CLI event to resize an interactive docker run or docker exec 431 // window. 432 func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { 433 // Get the libcontainerd container object 434 clnt.lock(containerID) 435 defer clnt.unlock(containerID) 436 cont, err := clnt.getContainer(containerID) 437 if err != nil { 438 return err 439 } 440 441 h, w := uint16(height), uint16(width) 442 443 if processFriendlyName == InitFriendlyName { 444 logrus.Debugln("libcontainerd: resizing systemPID in", containerID, cont.process.systemPid) 445 return cont.process.hcsProcess.ResizeConsole(w, h) 446 } 447 448 for _, p := range cont.processes { 449 if p.friendlyName == processFriendlyName { 450 logrus.Debugln("libcontainerd: resizing exec'd process", containerID, p.systemPid) 451 return p.hcsProcess.ResizeConsole(w, h) 452 } 453 } 454 455 return fmt.Errorf("Resize could not find containerID %s to resize", containerID) 456 457 } 458 459 // Pause handles pause requests for containers 460 func (clnt *client) Pause(containerID string) error { 461 unlockContainer := true 462 // Get the libcontainerd container object 463 clnt.lock(containerID) 464 defer func() { 465 if unlockContainer { 466 clnt.unlock(containerID) 467 } 468 }() 469 container, err := clnt.getContainer(containerID) 470 if err != nil { 471 return err 472 } 473 474 for _, option := range container.options { 475 if h, ok := option.(*HyperVIsolationOption); ok { 476 if !h.IsHyperV { 477 return errors.New("cannot pause Windows Server Containers") 478 } 479 break 480 } 481 } 482 483 err = container.hcsContainer.Pause() 484 if err != nil { 485 return err 486 } 487 488 // Unlock container before calling back into the daemon 489 unlockContainer = false 490 clnt.unlock(containerID) 491 492 return clnt.backend.StateChanged(containerID, StateInfo{ 493 CommonStateInfo: CommonStateInfo{ 494 State: StatePause, 495 }}) 496 } 497 498 // Resume handles resume requests for containers 499 func (clnt *client) Resume(containerID string) error { 500 unlockContainer := true 501 // Get the libcontainerd container object 502 clnt.lock(containerID) 503 defer func() { 504 if unlockContainer { 505 clnt.unlock(containerID) 506 } 507 }() 508 container, err := clnt.getContainer(containerID) 509 if err != nil { 510 return err 511 } 512 513 // This should never happen, since Windows Server Containers cannot be paused 514 for _, option := range container.options { 515 if h, ok := option.(*HyperVIsolationOption); ok { 516 if !h.IsHyperV { 517 return errors.New("cannot resume Windows Server Containers") 518 } 519 break 520 } 521 } 522 523 err = container.hcsContainer.Resume() 524 if err != nil { 525 return err 526 } 527 528 // Unlock container before calling back into the daemon 529 unlockContainer = false 530 clnt.unlock(containerID) 531 532 return clnt.backend.StateChanged(containerID, StateInfo{ 533 CommonStateInfo: CommonStateInfo{ 534 State: StateResume, 535 }}) 536 } 537 538 // Stats handles stats requests for containers 539 func (clnt *client) Stats(containerID string) (*Stats, error) { 540 // Get the libcontainerd container object 541 clnt.lock(containerID) 542 defer clnt.unlock(containerID) 543 container, err := clnt.getContainer(containerID) 544 if err != nil { 545 return nil, err 546 } 547 s, err := container.hcsContainer.Statistics() 548 if err != nil { 549 return nil, err 550 } 551 st := Stats(s) 552 return &st, nil 553 } 554 555 // Restore is the handler for restoring a container 556 func (clnt *client) Restore(containerID string, _ StdioCallback, unusedOnWindows ...CreateOption) error { 557 // TODO Windows: Implement this. For now, just tell the backend the container exited. 558 logrus.Debugf("libcontainerd: Restore(%s)", containerID) 559 return clnt.backend.StateChanged(containerID, StateInfo{ 560 CommonStateInfo: CommonStateInfo{ 561 State: StateExit, 562 ExitCode: 1 << 31, 563 }}) 564 } 565 566 // GetPidsForContainer returns a list of process IDs running in a container. 567 // Although implemented, this is not used in Windows. 568 func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { 569 var pids []int 570 clnt.lock(containerID) 571 defer clnt.unlock(containerID) 572 cont, err := clnt.getContainer(containerID) 573 if err != nil { 574 return nil, err 575 } 576 577 // Add the first process 578 pids = append(pids, int(cont.containerCommon.systemPid)) 579 // And add all the exec'd processes 580 for _, p := range cont.processes { 581 pids = append(pids, int(p.processCommon.systemPid)) 582 } 583 return pids, nil 584 } 585 586 // Summary returns a summary of the processes running in a container. 587 // This is present in Windows to support docker top. In linux, the 588 // engine shells out to ps to get process information. On Windows, as 589 // the containers could be Hyper-V containers, they would not be 590 // visible on the container host. However, libcontainerd does have 591 // that information. 592 func (clnt *client) Summary(containerID string) ([]Summary, error) { 593 594 // Get the libcontainerd container object 595 clnt.lock(containerID) 596 defer clnt.unlock(containerID) 597 container, err := clnt.getContainer(containerID) 598 if err != nil { 599 return nil, err 600 } 601 p, err := container.hcsContainer.ProcessList() 602 if err != nil { 603 return nil, err 604 } 605 pl := make([]Summary, len(p)) 606 for i := range p { 607 pl[i] = Summary(p[i]) 608 } 609 return pl, nil 610 } 611 612 // UpdateResources updates resources for a running container. 613 func (clnt *client) UpdateResources(containerID string, resources Resources) error { 614 // Updating resource isn't supported on Windows 615 // but we should return nil for enabling updating container 616 return nil 617 } 618 619 func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { 620 return errors.New("Windows: Containers do not support checkpoints") 621 } 622 623 func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { 624 return errors.New("Windows: Containers do not support checkpoints") 625 } 626 627 func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { 628 return nil, errors.New("Windows: Containers do not support checkpoints") 629 } 630 631 func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { 632 return &ServerVersion{}, nil 633 }