github.com/hanks177/podman/v4@v4.1.3-0.20220613032544-16d90015bc83/pkg/specgen/generate/container.go (about) 1 package generate 2 3 import ( 4 "context" 5 "encoding/json" 6 "fmt" 7 "os" 8 "strings" 9 "time" 10 11 "github.com/containers/common/libimage" 12 "github.com/containers/common/pkg/config" 13 "github.com/hanks177/podman/v4/libpod" 14 "github.com/hanks177/podman/v4/libpod/define" 15 ann "github.com/hanks177/podman/v4/pkg/annotations" 16 envLib "github.com/hanks177/podman/v4/pkg/env" 17 "github.com/hanks177/podman/v4/pkg/signal" 18 "github.com/hanks177/podman/v4/pkg/specgen" 19 spec "github.com/opencontainers/runtime-spec/specs-go" 20 "github.com/pkg/errors" 21 "github.com/sirupsen/logrus" 22 "golang.org/x/sys/unix" 23 ) 24 25 func getImageFromSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerator) (*libimage.Image, string, *libimage.ImageData, error) { 26 if s.Image == "" || s.Rootfs != "" { 27 return nil, "", nil, nil 28 } 29 30 // Image may already have been set in the generator. 31 image, resolvedName := s.GetImage() 32 if image != nil { 33 inspectData, err := image.Inspect(ctx, nil) 34 if err != nil { 35 return nil, "", nil, err 36 } 37 return image, resolvedName, inspectData, nil 38 } 39 40 // Need to look up image. 41 image, resolvedName, err := r.LibimageRuntime().LookupImage(s.Image, nil) 42 if err != nil { 43 return nil, "", nil, err 44 } 45 s.SetImage(image, resolvedName) 46 inspectData, err := image.Inspect(ctx, nil) 47 if err != nil { 48 return nil, "", nil, err 49 } 50 return image, resolvedName, inspectData, err 51 } 52 53 // Fill any missing parts of the spec generator (e.g. from the image). 54 // Returns a set of warnings or any fatal error that occurred. 55 func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerator) ([]string, error) { 56 // Only add image configuration if we have an image 57 newImage, _, inspectData, err := getImageFromSpec(ctx, r, s) 58 if err != nil { 59 return nil, err 60 } 61 if inspectData != nil { 62 inspectData, err = newImage.Inspect(ctx, nil) 63 if err != nil { 64 return nil, err 65 } 66 67 if s.HealthConfig == nil { 68 // NOTE: the health check is only set for Docker images 69 // but inspect will take care of it. 70 s.HealthConfig = inspectData.HealthCheck 71 if s.HealthConfig != nil { 72 if s.HealthConfig.Timeout == 0 { 73 hct, err := time.ParseDuration(define.DefaultHealthCheckTimeout) 74 if err != nil { 75 return nil, err 76 } 77 s.HealthConfig.Timeout = hct 78 } 79 if s.HealthConfig.Interval == 0 { 80 hct, err := time.ParseDuration(define.DefaultHealthCheckInterval) 81 if err != nil { 82 return nil, err 83 } 84 s.HealthConfig.Interval = hct 85 } 86 } 87 } 88 89 // Image stop signal 90 if s.StopSignal == nil { 91 if inspectData.Config.StopSignal != "" { 92 sig, err := signal.ParseSignalNameOrNumber(inspectData.Config.StopSignal) 93 if err != nil { 94 return nil, err 95 } 96 s.StopSignal = &sig 97 } 98 } 99 } 100 101 rtc, err := r.GetConfigNoCopy() 102 if err != nil { 103 return nil, err 104 } 105 106 // Get Default Environment from containers.conf 107 defaultEnvs, err := envLib.ParseSlice(rtc.GetDefaultEnvEx(s.EnvHost, s.HTTPProxy)) 108 if err != nil { 109 return nil, errors.Wrap(err, "error parsing fields in containers.conf") 110 } 111 var envs map[string]string 112 113 // Image Environment defaults 114 if inspectData != nil { 115 // Image envs from the image if they don't exist 116 // already, overriding the default environments 117 envs, err = envLib.ParseSlice(inspectData.Config.Env) 118 if err != nil { 119 return nil, errors.Wrap(err, "Env fields from image failed to parse") 120 } 121 defaultEnvs = envLib.Join(envLib.DefaultEnvVariables(), envLib.Join(defaultEnvs, envs)) 122 } 123 124 for _, e := range s.UnsetEnv { 125 delete(defaultEnvs, e) 126 } 127 128 if s.UnsetEnvAll { 129 defaultEnvs = make(map[string]string) 130 } 131 // First transform the os env into a map. We need it for the labels later in 132 // any case. 133 osEnv, err := envLib.ParseSlice(os.Environ()) 134 if err != nil { 135 return nil, errors.Wrap(err, "error parsing host environment variables") 136 } 137 // Caller Specified defaults 138 if s.EnvHost { 139 defaultEnvs = envLib.Join(defaultEnvs, osEnv) 140 } else if s.HTTPProxy { 141 for _, envSpec := range config.ProxyEnv { 142 if v, ok := osEnv[envSpec]; ok { 143 defaultEnvs[envSpec] = v 144 } 145 } 146 } 147 148 s.Env = envLib.Join(defaultEnvs, s.Env) 149 150 // Labels and Annotations 151 annotations := make(map[string]string) 152 if newImage != nil { 153 labels, err := newImage.Labels(ctx) 154 if err != nil { 155 return nil, err 156 } 157 158 // labels from the image that don't exist already 159 if len(labels) > 0 && s.Labels == nil { 160 s.Labels = make(map[string]string) 161 } 162 for k, v := range labels { 163 if _, exists := s.Labels[k]; !exists { 164 s.Labels[k] = v 165 } 166 } 167 168 // Add annotations from the image 169 for k, v := range inspectData.Annotations { 170 if !define.IsReservedAnnotation(k) { 171 annotations[k] = v 172 } 173 } 174 } 175 176 // in the event this container is in a pod, and the pod has an infra container 177 // we will want to configure it as a type "container" instead defaulting to 178 // the behavior of a "sandbox" container 179 // In Kata containers: 180 // - "sandbox" is the annotation that denotes the container should use its own 181 // VM, which is the default behavior 182 // - "container" denotes the container should join the VM of the SandboxID 183 // (the infra container) 184 if len(s.Pod) > 0 { 185 annotations[ann.SandboxID] = s.Pod 186 annotations[ann.ContainerType] = ann.ContainerTypeContainer 187 // Check if this is an init-ctr and if so, check if 188 // the pod is running. we do not want to add init-ctrs to 189 // a running pod because it creates confusion for us. 190 if len(s.InitContainerType) > 0 { 191 p, err := r.LookupPod(s.Pod) 192 if err != nil { 193 return nil, err 194 } 195 containerStatuses, err := p.Status() 196 if err != nil { 197 return nil, err 198 } 199 // If any one of the containers is running, the pod is considered to be 200 // running 201 for _, con := range containerStatuses { 202 if con == define.ContainerStateRunning { 203 return nil, errors.New("cannot add init-ctr to a running pod") 204 } 205 } 206 } 207 } 208 209 for _, v := range rtc.Containers.Annotations { 210 split := strings.SplitN(v, "=", 2) 211 k := split[0] 212 v := "" 213 if len(split) == 2 { 214 v = split[1] 215 } 216 annotations[k] = v 217 } 218 // now pass in the values from client 219 for k, v := range s.Annotations { 220 annotations[k] = v 221 } 222 s.Annotations = annotations 223 224 if len(s.SeccompProfilePath) < 1 { 225 p, err := libpod.DefaultSeccompPath() 226 if err != nil { 227 return nil, err 228 } 229 s.SeccompProfilePath = p 230 } 231 232 if len(s.User) == 0 && inspectData != nil { 233 s.User = inspectData.Config.User 234 } 235 // Unless already set via the CLI, check if we need to disable process 236 // labels or set the defaults. 237 if len(s.SelinuxOpts) == 0 { 238 if err := setLabelOpts(s, r, s.PidNS, s.IpcNS); err != nil { 239 return nil, err 240 } 241 } 242 243 if s.CgroupsMode == "" { 244 s.CgroupsMode = rtc.Cgroups() 245 } 246 247 // If caller did not specify Pids Limits load default 248 if s.ResourceLimits == nil || s.ResourceLimits.Pids == nil { 249 if s.CgroupsMode != "disabled" { 250 limit := rtc.PidsLimit() 251 if limit != 0 { 252 if s.ResourceLimits == nil { 253 s.ResourceLimits = &spec.LinuxResources{} 254 } 255 s.ResourceLimits.Pids = &spec.LinuxPids{ 256 Limit: limit, 257 } 258 } 259 } 260 } 261 262 if s.LogConfiguration == nil { 263 s.LogConfiguration = &specgen.LogConfig{} 264 } 265 // set log-driver from common if not already set 266 if len(s.LogConfiguration.Driver) < 1 { 267 s.LogConfiguration.Driver = rtc.Containers.LogDriver 268 } 269 if len(rtc.Containers.LogTag) > 0 { 270 if s.LogConfiguration.Driver != define.JSONLogging { 271 if s.LogConfiguration.Options == nil { 272 s.LogConfiguration.Options = make(map[string]string) 273 } 274 275 s.LogConfiguration.Options["tag"] = rtc.Containers.LogTag 276 } else { 277 logrus.Warnf("log_tag %q is not allowed with %q log_driver", rtc.Containers.LogTag, define.JSONLogging) 278 } 279 } 280 281 warnings, err := verifyContainerResources(s) 282 if err != nil { 283 return warnings, err 284 } 285 286 // Warn on net=host/container/pod/none and port mappings. 287 if (s.NetNS.NSMode == specgen.Host || s.NetNS.NSMode == specgen.FromContainer || 288 s.NetNS.NSMode == specgen.FromPod || s.NetNS.NSMode == specgen.NoNetwork) && 289 len(s.PortMappings) > 0 { 290 warnings = append(warnings, "Port mappings have been discarded as one of the Host, Container, Pod, and None network modes are in use") 291 } 292 293 return warnings, nil 294 } 295 296 // FinishThrottleDevices takes the temporary representation of the throttle 297 // devices in the specgen and looks up the major and major minors. it then 298 // sets the throttle devices proper in the specgen 299 func FinishThrottleDevices(s *specgen.SpecGenerator) error { 300 if bps := s.ThrottleReadBpsDevice; len(bps) > 0 { 301 for k, v := range bps { 302 statT := unix.Stat_t{} 303 if err := unix.Stat(k, &statT); err != nil { 304 return err 305 } 306 v.Major = (int64(unix.Major(uint64(statT.Rdev)))) // nolint: unconvert 307 v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) // nolint: unconvert 308 if s.ResourceLimits.BlockIO == nil { 309 s.ResourceLimits.BlockIO = new(spec.LinuxBlockIO) 310 } 311 s.ResourceLimits.BlockIO.ThrottleReadBpsDevice = append(s.ResourceLimits.BlockIO.ThrottleReadBpsDevice, v) 312 } 313 } 314 if bps := s.ThrottleWriteBpsDevice; len(bps) > 0 { 315 for k, v := range bps { 316 statT := unix.Stat_t{} 317 if err := unix.Stat(k, &statT); err != nil { 318 return err 319 } 320 v.Major = (int64(unix.Major(uint64(statT.Rdev)))) // nolint: unconvert 321 v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) // nolint: unconvert 322 s.ResourceLimits.BlockIO.ThrottleWriteBpsDevice = append(s.ResourceLimits.BlockIO.ThrottleWriteBpsDevice, v) 323 } 324 } 325 if iops := s.ThrottleReadIOPSDevice; len(iops) > 0 { 326 for k, v := range iops { 327 statT := unix.Stat_t{} 328 if err := unix.Stat(k, &statT); err != nil { 329 return err 330 } 331 v.Major = (int64(unix.Major(uint64(statT.Rdev)))) // nolint: unconvert 332 v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) // nolint: unconvert 333 s.ResourceLimits.BlockIO.ThrottleReadIOPSDevice = append(s.ResourceLimits.BlockIO.ThrottleReadIOPSDevice, v) 334 } 335 } 336 if iops := s.ThrottleWriteIOPSDevice; len(iops) > 0 { 337 for k, v := range iops { 338 statT := unix.Stat_t{} 339 if err := unix.Stat(k, &statT); err != nil { 340 return err 341 } 342 v.Major = (int64(unix.Major(uint64(statT.Rdev)))) // nolint: unconvert 343 v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) // nolint: unconvert 344 s.ResourceLimits.BlockIO.ThrottleWriteIOPSDevice = append(s.ResourceLimits.BlockIO.ThrottleWriteIOPSDevice, v) 345 } 346 } 347 return nil 348 } 349 350 // ConfigToSpec takes a completed container config and converts it back into a specgenerator for purposes of cloning an existing container 351 func ConfigToSpec(rt *libpod.Runtime, specg *specgen.SpecGenerator, contaierID string) (*libpod.Container, *libpod.InfraInherit, error) { 352 c, err := rt.LookupContainer(contaierID) 353 if err != nil { 354 return nil, nil, err 355 } 356 conf := c.ConfigWithNetworks() 357 if conf == nil { 358 return nil, nil, fmt.Errorf("failed to get config for container %s", c.ID()) 359 } 360 361 tmpSystemd := conf.Systemd 362 tmpMounts := conf.Mounts 363 364 conf.Systemd = nil 365 conf.Mounts = []string{} 366 367 if specg == nil { 368 specg = &specgen.SpecGenerator{} 369 } 370 371 specg.Pod = conf.Pod 372 373 matching, err := json.Marshal(conf) 374 if err != nil { 375 return nil, nil, err 376 } 377 378 err = json.Unmarshal(matching, specg) 379 if err != nil { 380 return nil, nil, err 381 } 382 383 conf.Systemd = tmpSystemd 384 conf.Mounts = tmpMounts 385 386 if conf.Spec != nil && conf.Spec.Linux != nil && conf.Spec.Linux.Resources != nil { 387 if specg.ResourceLimits == nil { 388 specg.ResourceLimits = conf.Spec.Linux.Resources 389 } 390 } 391 392 nameSpaces := []string{"pid", "net", "cgroup", "ipc", "uts", "user"} 393 containers := []string{conf.PIDNsCtr, conf.NetNsCtr, conf.CgroupNsCtr, conf.IPCNsCtr, conf.UTSNsCtr, conf.UserNsCtr} 394 place := []*specgen.Namespace{&specg.PidNS, &specg.NetNS, &specg.CgroupNS, &specg.IpcNS, &specg.UtsNS, &specg.UserNS} 395 for i, ns := range containers { 396 if len(ns) > 0 { 397 ns := specgen.Namespace{NSMode: specgen.FromContainer, Value: ns} 398 place[i] = &ns 399 } else { 400 switch nameSpaces[i] { 401 case "pid": 402 specg.PidNS = specgen.Namespace{NSMode: specgen.Default} // default 403 case "net": 404 switch { 405 case conf.NetMode.IsBridge(): 406 toExpose := make(map[uint16]string, len(conf.ExposedPorts)) 407 for _, expose := range []map[uint16][]string{conf.ExposedPorts} { 408 for port, proto := range expose { 409 toExpose[port] = strings.Join(proto, ",") 410 } 411 } 412 specg.Expose = toExpose 413 specg.PortMappings = conf.PortMappings 414 specg.NetNS = specgen.Namespace{NSMode: specgen.Bridge} 415 case conf.NetMode.IsSlirp4netns(): 416 toExpose := make(map[uint16]string, len(conf.ExposedPorts)) 417 for _, expose := range []map[uint16][]string{conf.ExposedPorts} { 418 for port, proto := range expose { 419 toExpose[port] = strings.Join(proto, ",") 420 } 421 } 422 specg.Expose = toExpose 423 specg.PortMappings = conf.PortMappings 424 netMode := strings.Split(string(conf.NetMode), ":") 425 var val string 426 if len(netMode) > 1 { 427 val = netMode[1] 428 } 429 specg.NetNS = specgen.Namespace{NSMode: specgen.Slirp, Value: val} 430 case conf.NetMode.IsPrivate(): 431 specg.NetNS = specgen.Namespace{NSMode: specgen.Private} 432 case conf.NetMode.IsDefault(): 433 specg.NetNS = specgen.Namespace{NSMode: specgen.Default} 434 case conf.NetMode.IsUserDefined(): 435 specg.NetNS = specgen.Namespace{NSMode: specgen.Path, Value: strings.Split(string(conf.NetMode), ":")[1]} 436 case conf.NetMode.IsContainer(): 437 specg.NetNS = specgen.Namespace{NSMode: specgen.FromContainer, Value: strings.Split(string(conf.NetMode), ":")[1]} 438 case conf.NetMode.IsPod(): 439 specg.NetNS = specgen.Namespace{NSMode: specgen.FromPod, Value: strings.Split(string(conf.NetMode), ":")[1]} 440 } 441 case "cgroup": 442 specg.CgroupNS = specgen.Namespace{NSMode: specgen.Default} // default 443 case "ipc": 444 switch conf.ShmDir { 445 case "/dev/shm": 446 specg.IpcNS = specgen.Namespace{NSMode: specgen.Host} 447 case "": 448 specg.IpcNS = specgen.Namespace{NSMode: specgen.None} 449 default: 450 specg.IpcNS = specgen.Namespace{NSMode: specgen.Default} // default 451 } 452 case "uts": 453 specg.UtsNS = specgen.Namespace{NSMode: specgen.Default} // default 454 case "user": 455 if conf.AddCurrentUserPasswdEntry { 456 specg.UserNS = specgen.Namespace{NSMode: specgen.KeepID} 457 } else { 458 specg.UserNS = specgen.Namespace{NSMode: specgen.Default} // default 459 } 460 } 461 } 462 } 463 464 specg.IDMappings = &conf.IDMappings 465 specg.ContainerCreateCommand = conf.CreateCommand 466 if len(specg.Rootfs) == 0 { 467 specg.Rootfs = conf.Rootfs 468 } 469 if len(specg.Image) == 0 { 470 specg.Image = conf.RootfsImageID 471 } 472 var named []*specgen.NamedVolume 473 if len(conf.NamedVolumes) != 0 { 474 for _, v := range conf.NamedVolumes { 475 named = append(named, &specgen.NamedVolume{ 476 Name: v.Name, 477 Dest: v.Dest, 478 Options: v.Options, 479 }) 480 } 481 } 482 specg.Volumes = named 483 var image []*specgen.ImageVolume 484 if len(conf.ImageVolumes) != 0 { 485 for _, v := range conf.ImageVolumes { 486 image = append(image, &specgen.ImageVolume{ 487 Source: v.Source, 488 Destination: v.Dest, 489 ReadWrite: v.ReadWrite, 490 }) 491 } 492 } 493 specg.ImageVolumes = image 494 var overlay []*specgen.OverlayVolume 495 if len(conf.OverlayVolumes) != 0 { 496 for _, v := range conf.OverlayVolumes { 497 overlay = append(overlay, &specgen.OverlayVolume{ 498 Source: v.Source, 499 Destination: v.Dest, 500 Options: v.Options, 501 }) 502 } 503 } 504 specg.OverlayVolumes = overlay 505 _, mounts := c.SortUserVolumes(c.Spec()) 506 specg.Mounts = mounts 507 specg.HostDeviceList = conf.DeviceHostSrc 508 specg.Networks = conf.Networks 509 510 mapSecurityConfig(conf, specg) 511 512 if c.IsInfra() { // if we are creating this spec for a pod's infra ctr, map the compatible options 513 spec, err := json.Marshal(specg) 514 if err != nil { 515 return nil, nil, err 516 } 517 infraInherit := &libpod.InfraInherit{} 518 err = json.Unmarshal(spec, infraInherit) 519 return c, infraInherit, err 520 } 521 // else just return the container 522 return c, nil, nil 523 } 524 525 // mapSecurityConfig takes a libpod.ContainerSecurityConfig and converts it to a specgen.ContinerSecurityConfig 526 func mapSecurityConfig(c *libpod.ContainerConfig, s *specgen.SpecGenerator) { 527 s.Privileged = c.Privileged 528 s.SelinuxOpts = append(s.SelinuxOpts, c.LabelOpts...) 529 s.User = c.User 530 s.Groups = c.Groups 531 s.HostUsers = c.HostUsers 532 }