github.com/containers/podman/v4@v4.9.4/pkg/specgen/generate/container.go (about)

     1  //go:build !remote
     2  // +build !remote
     3  
     4  package generate
     5  
     6  import (
     7  	"context"
     8  	"encoding/json"
     9  	"errors"
    10  	"fmt"
    11  	"os"
    12  	"strconv"
    13  	"strings"
    14  	"time"
    15  
    16  	"github.com/containers/common/libimage"
    17  	"github.com/containers/common/pkg/config"
    18  	"github.com/containers/podman/v4/libpod"
    19  	"github.com/containers/podman/v4/libpod/define"
    20  	ann "github.com/containers/podman/v4/pkg/annotations"
    21  	envLib "github.com/containers/podman/v4/pkg/env"
    22  	"github.com/containers/podman/v4/pkg/signal"
    23  	"github.com/containers/podman/v4/pkg/specgen"
    24  	"github.com/openshift/imagebuilder"
    25  	"github.com/sirupsen/logrus"
    26  )
    27  
    28  func getImageFromSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerator) (*libimage.Image, string, *libimage.ImageData, error) {
    29  	if s.Image == "" || s.Rootfs != "" {
    30  		return nil, "", nil, nil
    31  	}
    32  
    33  	// Image may already have been set in the generator.
    34  	image, resolvedName := s.GetImage()
    35  	if image != nil {
    36  		inspectData, err := image.Inspect(ctx, nil)
    37  		if err != nil {
    38  			return nil, "", nil, err
    39  		}
    40  		return image, resolvedName, inspectData, nil
    41  	}
    42  
    43  	// Need to look up image.
    44  	lookupOptions := &libimage.LookupImageOptions{ManifestList: true}
    45  	image, resolvedName, err := r.LibimageRuntime().LookupImage(s.Image, lookupOptions)
    46  	if err != nil {
    47  		return nil, "", nil, err
    48  	}
    49  	manifestList, err := image.ToManifestList()
    50  	// only process if manifest list found otherwise expect it to be regular image
    51  	if err == nil {
    52  		image, err = manifestList.LookupInstance(ctx, s.ImageArch, s.ImageOS, s.ImageVariant)
    53  		if err != nil {
    54  			return nil, "", nil, err
    55  		}
    56  	}
    57  	s.SetImage(image, resolvedName)
    58  	inspectData, err := image.Inspect(ctx, nil)
    59  	if err != nil {
    60  		return nil, "", nil, err
    61  	}
    62  	return image, resolvedName, inspectData, err
    63  }
    64  
    65  // Fill any missing parts of the spec generator (e.g. from the image).
    66  // Returns a set of warnings or any fatal error that occurred.
    67  func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerator) ([]string, error) {
    68  	// Only add image configuration if we have an image
    69  	newImage, _, inspectData, err := getImageFromSpec(ctx, r, s)
    70  	if err != nil {
    71  		return nil, err
    72  	}
    73  	if inspectData != nil {
    74  		if s.HealthConfig == nil {
    75  			// NOTE: the health check is only set for Docker images
    76  			// but inspect will take care of it.
    77  			s.HealthConfig = inspectData.HealthCheck
    78  			if s.HealthConfig != nil {
    79  				if s.HealthConfig.Timeout == 0 {
    80  					hct, err := time.ParseDuration(define.DefaultHealthCheckTimeout)
    81  					if err != nil {
    82  						return nil, err
    83  					}
    84  					s.HealthConfig.Timeout = hct
    85  				}
    86  				if s.HealthConfig.Interval == 0 {
    87  					hct, err := time.ParseDuration(define.DefaultHealthCheckInterval)
    88  					if err != nil {
    89  						return nil, err
    90  					}
    91  					s.HealthConfig.Interval = hct
    92  				}
    93  			}
    94  		}
    95  
    96  		// Image stop signal
    97  		if s.StopSignal == nil {
    98  			if inspectData.Config.StopSignal != "" {
    99  				sig, err := signal.ParseSignalNameOrNumber(inspectData.Config.StopSignal)
   100  				if err != nil {
   101  					return nil, err
   102  				}
   103  				s.StopSignal = &sig
   104  			}
   105  		}
   106  	}
   107  
   108  	rtc, err := r.GetConfigNoCopy()
   109  	if err != nil {
   110  		return nil, err
   111  	}
   112  
   113  	// Get Default Environment from containers.conf
   114  	defaultEnvs, err := envLib.ParseSlice(rtc.GetDefaultEnvEx(s.EnvHost, s.HTTPProxy))
   115  	if err != nil {
   116  		return nil, fmt.Errorf("parsing fields in containers.conf: %w", err)
   117  	}
   118  	var envs map[string]string
   119  
   120  	// Image Environment defaults
   121  	if inspectData != nil {
   122  		// Image envs from the image if they don't exist
   123  		// already, overriding the default environments
   124  		envs, err = envLib.ParseSlice(inspectData.Config.Env)
   125  		if err != nil {
   126  			return nil, fmt.Errorf("env fields from image failed to parse: %w", err)
   127  		}
   128  		defaultEnvs = envLib.Join(envLib.DefaultEnvVariables(), envLib.Join(defaultEnvs, envs))
   129  	}
   130  
   131  	// add default terminal to env if tty flag is set
   132  	_, ok := defaultEnvs["TERM"]
   133  	if s.Terminal && !ok {
   134  		defaultEnvs["TERM"] = "xterm"
   135  	}
   136  
   137  	for _, e := range s.EnvMerge {
   138  		processedWord, err := imagebuilder.ProcessWord(e, envLib.Slice(defaultEnvs))
   139  		if err != nil {
   140  			return nil, fmt.Errorf("unable to process variables for --env-merge %s: %w", e, err)
   141  		}
   142  
   143  		key, val, found := strings.Cut(processedWord, "=")
   144  		if !found {
   145  			return nil, fmt.Errorf("missing `=` for --env-merge substitution %s", e)
   146  		}
   147  
   148  		// the env var passed via --env-merge
   149  		// need not be defined in the image
   150  		// continue with an empty string
   151  		defaultEnvs[key] = val
   152  	}
   153  
   154  	for _, e := range s.UnsetEnv {
   155  		delete(defaultEnvs, e)
   156  	}
   157  
   158  	if s.UnsetEnvAll {
   159  		defaultEnvs = make(map[string]string)
   160  	}
   161  	// First transform the os env into a map. We need it for the labels later in
   162  	// any case.
   163  	osEnv := envLib.Map(os.Environ())
   164  
   165  	// Caller Specified defaults
   166  	if s.EnvHost {
   167  		defaultEnvs = envLib.Join(defaultEnvs, osEnv)
   168  	} else if s.HTTPProxy {
   169  		for _, envSpec := range config.ProxyEnv {
   170  			if v, ok := osEnv[envSpec]; ok {
   171  				defaultEnvs[envSpec] = v
   172  			}
   173  		}
   174  	}
   175  
   176  	s.Env = envLib.Join(defaultEnvs, s.Env)
   177  
   178  	// Labels and Annotations
   179  	if newImage != nil {
   180  		labels, err := newImage.Labels(ctx)
   181  		if err != nil {
   182  			return nil, err
   183  		}
   184  
   185  		// labels from the image that don't already exist
   186  		if len(labels) > 0 && s.Labels == nil {
   187  			s.Labels = make(map[string]string)
   188  		}
   189  		for k, v := range labels {
   190  			if _, exists := s.Labels[k]; !exists {
   191  				s.Labels[k] = v
   192  			}
   193  		}
   194  
   195  		// Do NOT include image annotations - these can have security
   196  		// implications, we don't want untrusted images setting them.
   197  	}
   198  
   199  	// in the event this container is in a pod, and the pod has an infra container
   200  	// we will want to configure it as a type "container" instead defaulting to
   201  	// the behavior of a "sandbox" container
   202  	// In Kata containers:
   203  	// - "sandbox" is the annotation that denotes the container should use its own
   204  	//   VM, which is the default behavior
   205  	// - "container" denotes the container should join the VM of the SandboxID
   206  	//   (the infra container)
   207  	annotations := make(map[string]string)
   208  	if len(s.Pod) > 0 {
   209  		p, err := r.LookupPod(s.Pod)
   210  		if err != nil {
   211  			return nil, err
   212  		}
   213  		sandboxID := p.ID()
   214  		if p.HasInfraContainer() {
   215  			infra, err := p.InfraContainer()
   216  			if err != nil {
   217  				return nil, err
   218  			}
   219  			sandboxID = infra.ID()
   220  		}
   221  		annotations[ann.SandboxID] = sandboxID
   222  		// Check if this is an init-ctr and if so, check if
   223  		// the pod is running.  we do not want to add init-ctrs to
   224  		// a running pod because it creates confusion for us.
   225  		if len(s.InitContainerType) > 0 {
   226  			containerStatuses, err := p.Status()
   227  			if err != nil {
   228  				return nil, err
   229  			}
   230  			// If any one of the containers is running, the pod is considered to be
   231  			// running
   232  			for _, con := range containerStatuses {
   233  				if con == define.ContainerStateRunning {
   234  					return nil, errors.New("cannot add init-ctr to a running pod")
   235  				}
   236  			}
   237  		}
   238  	}
   239  
   240  	for _, v := range rtc.Containers.Annotations.Get() {
   241  		split := strings.SplitN(v, "=", 2)
   242  		k := split[0]
   243  		v := ""
   244  		if len(split) == 2 {
   245  			v = split[1]
   246  		}
   247  		annotations[k] = v
   248  	}
   249  	// now pass in the values from client
   250  	for k, v := range s.Annotations {
   251  		annotations[k] = v
   252  	}
   253  	s.Annotations = annotations
   254  
   255  	if len(s.SeccompProfilePath) < 1 {
   256  		p, err := libpod.DefaultSeccompPath()
   257  		if err != nil {
   258  			return nil, err
   259  		}
   260  		s.SeccompProfilePath = p
   261  	}
   262  
   263  	if len(s.User) == 0 && inspectData != nil {
   264  		s.User = inspectData.Config.User
   265  	}
   266  	// Unless already set via the CLI, check if we need to disable process
   267  	// labels or set the defaults.
   268  	if len(s.SelinuxOpts) == 0 {
   269  		if err := setLabelOpts(s, r, s.PidNS, s.IpcNS); err != nil {
   270  			return nil, err
   271  		}
   272  	}
   273  
   274  	if s.CgroupsMode == "" {
   275  		s.CgroupsMode = rtc.Cgroups()
   276  	}
   277  
   278  	// If caller did not specify Pids Limits load default
   279  	s.InitResourceLimits(rtc)
   280  
   281  	if s.LogConfiguration == nil {
   282  		s.LogConfiguration = &specgen.LogConfig{}
   283  	}
   284  	// set log-driver from common if not already set
   285  	if len(s.LogConfiguration.Driver) < 1 {
   286  		s.LogConfiguration.Driver = rtc.Containers.LogDriver
   287  	}
   288  	if len(rtc.Containers.LogTag) > 0 {
   289  		if s.LogConfiguration.Driver != define.JSONLogging {
   290  			if s.LogConfiguration.Options == nil {
   291  				s.LogConfiguration.Options = make(map[string]string)
   292  			}
   293  
   294  			s.LogConfiguration.Options["tag"] = rtc.Containers.LogTag
   295  		} else {
   296  			logrus.Warnf("log_tag %q is not allowed with %q log_driver", rtc.Containers.LogTag, define.JSONLogging)
   297  		}
   298  	}
   299  
   300  	warnings, err := verifyContainerResources(s)
   301  	if err != nil {
   302  		return warnings, err
   303  	}
   304  
   305  	// Warn on net=host/container/pod/none and port mappings.
   306  	if (s.NetNS.NSMode == specgen.Host || s.NetNS.NSMode == specgen.FromContainer ||
   307  		s.NetNS.NSMode == specgen.FromPod || s.NetNS.NSMode == specgen.NoNetwork) &&
   308  		len(s.PortMappings) > 0 {
   309  		warnings = append(warnings, "Port mappings have been discarded as one of the Host, Container, Pod, and None network modes are in use")
   310  	}
   311  
   312  	return warnings, nil
   313  }
   314  
   315  // ConfigToSpec takes a completed container config and converts it back into a specgenerator for purposes of cloning an existing container
   316  func ConfigToSpec(rt *libpod.Runtime, specg *specgen.SpecGenerator, containerID string) (*libpod.Container, *libpod.InfraInherit, error) {
   317  	c, err := rt.LookupContainer(containerID)
   318  	if err != nil {
   319  		return nil, nil, err
   320  	}
   321  	conf := c.ConfigWithNetworks()
   322  	if conf == nil {
   323  		return nil, nil, fmt.Errorf("failed to get config for container %s", c.ID())
   324  	}
   325  
   326  	tmpSystemd := conf.Systemd
   327  	tmpMounts := conf.Mounts
   328  
   329  	conf.Systemd = nil
   330  	conf.Mounts = []string{}
   331  
   332  	if specg == nil {
   333  		specg = &specgen.SpecGenerator{}
   334  	}
   335  
   336  	specg.Pod = conf.Pod
   337  
   338  	matching, err := json.Marshal(conf)
   339  	if err != nil {
   340  		return nil, nil, err
   341  	}
   342  
   343  	err = json.Unmarshal(matching, specg)
   344  	if err != nil {
   345  		return nil, nil, err
   346  	}
   347  
   348  	conf.Systemd = tmpSystemd
   349  	conf.Mounts = tmpMounts
   350  
   351  	if conf.Spec != nil {
   352  		if conf.Spec.Linux != nil && conf.Spec.Linux.Resources != nil {
   353  			if specg.ResourceLimits == nil {
   354  				specg.ResourceLimits = conf.Spec.Linux.Resources
   355  			}
   356  		}
   357  		if conf.Spec.Process != nil && conf.Spec.Process.Env != nil {
   358  			env := make(map[string]string)
   359  			for _, entry := range conf.Spec.Process.Env {
   360  				split := strings.SplitN(entry, "=", 2)
   361  				if len(split) == 2 {
   362  					env[split[0]] = split[1]
   363  				}
   364  			}
   365  			specg.Env = env
   366  		}
   367  	}
   368  
   369  	nameSpaces := []string{"pid", "net", "cgroup", "ipc", "uts", "user"}
   370  	containers := []string{conf.PIDNsCtr, conf.NetNsCtr, conf.CgroupNsCtr, conf.IPCNsCtr, conf.UTSNsCtr, conf.UserNsCtr}
   371  	place := []*specgen.Namespace{&specg.PidNS, &specg.NetNS, &specg.CgroupNS, &specg.IpcNS, &specg.UtsNS, &specg.UserNS}
   372  	for i, ns := range containers {
   373  		if len(ns) > 0 {
   374  			ns := specgen.Namespace{NSMode: specgen.FromContainer, Value: ns}
   375  			place[i] = &ns
   376  		} else {
   377  			switch nameSpaces[i] {
   378  			case "pid":
   379  				specg.PidNS = specgen.Namespace{NSMode: specgen.Default} // default
   380  			case "net":
   381  				switch {
   382  				case conf.NetMode.IsBridge():
   383  					toExpose := make(map[uint16]string, len(conf.ExposedPorts))
   384  					for _, expose := range []map[uint16][]string{conf.ExposedPorts} {
   385  						for port, proto := range expose {
   386  							toExpose[port] = strings.Join(proto, ",")
   387  						}
   388  					}
   389  					specg.Expose = toExpose
   390  					specg.PortMappings = conf.PortMappings
   391  					specg.NetNS = specgen.Namespace{NSMode: specgen.Bridge}
   392  				case conf.NetMode.IsSlirp4netns():
   393  					toExpose := make(map[uint16]string, len(conf.ExposedPorts))
   394  					for _, expose := range []map[uint16][]string{conf.ExposedPorts} {
   395  						for port, proto := range expose {
   396  							toExpose[port] = strings.Join(proto, ",")
   397  						}
   398  					}
   399  					specg.Expose = toExpose
   400  					specg.PortMappings = conf.PortMappings
   401  					netMode := strings.Split(string(conf.NetMode), ":")
   402  					var val string
   403  					if len(netMode) > 1 {
   404  						val = netMode[1]
   405  					}
   406  					specg.NetNS = specgen.Namespace{NSMode: specgen.Slirp, Value: val}
   407  				case conf.NetMode.IsPrivate():
   408  					specg.NetNS = specgen.Namespace{NSMode: specgen.Private}
   409  				case conf.NetMode.IsDefault():
   410  					specg.NetNS = specgen.Namespace{NSMode: specgen.Default}
   411  				case conf.NetMode.IsUserDefined():
   412  					specg.NetNS = specgen.Namespace{NSMode: specgen.Path, Value: strings.Split(string(conf.NetMode), ":")[1]}
   413  				case conf.NetMode.IsContainer():
   414  					specg.NetNS = specgen.Namespace{NSMode: specgen.FromContainer, Value: strings.Split(string(conf.NetMode), ":")[1]}
   415  				case conf.NetMode.IsPod():
   416  					specg.NetNS = specgen.Namespace{NSMode: specgen.FromPod, Value: strings.Split(string(conf.NetMode), ":")[1]}
   417  				}
   418  			case "cgroup":
   419  				specg.CgroupNS = specgen.Namespace{NSMode: specgen.Default} // default
   420  			case "ipc":
   421  				switch conf.ShmDir {
   422  				case "/dev/shm":
   423  					specg.IpcNS = specgen.Namespace{NSMode: specgen.Host}
   424  				case "":
   425  					specg.IpcNS = specgen.Namespace{NSMode: specgen.None}
   426  				default:
   427  					specg.IpcNS = specgen.Namespace{NSMode: specgen.Default} // default
   428  				}
   429  			case "uts":
   430  				specg.UtsNS = specgen.Namespace{NSMode: specgen.Private} // default
   431  			case "user":
   432  				if conf.AddCurrentUserPasswdEntry {
   433  					specg.UserNS = specgen.Namespace{NSMode: specgen.KeepID}
   434  				} else {
   435  					specg.UserNS = specgen.Namespace{NSMode: specgen.Default} // default
   436  				}
   437  			}
   438  		}
   439  	}
   440  
   441  	specg.IDMappings = &conf.IDMappings
   442  	specg.ContainerCreateCommand = conf.CreateCommand
   443  	if len(specg.Rootfs) == 0 {
   444  		specg.Rootfs = conf.Rootfs
   445  	}
   446  	if len(specg.Image) == 0 {
   447  		specg.Image = conf.RootfsImageID
   448  	}
   449  	var named []*specgen.NamedVolume
   450  	if len(conf.NamedVolumes) != 0 {
   451  		for _, v := range conf.NamedVolumes {
   452  			named = append(named, &specgen.NamedVolume{
   453  				Name:    v.Name,
   454  				Dest:    v.Dest,
   455  				Options: v.Options,
   456  			})
   457  		}
   458  	}
   459  	specg.Volumes = named
   460  	var image []*specgen.ImageVolume
   461  	if len(conf.ImageVolumes) != 0 {
   462  		for _, v := range conf.ImageVolumes {
   463  			image = append(image, &specgen.ImageVolume{
   464  				Source:      v.Source,
   465  				Destination: v.Dest,
   466  				ReadWrite:   v.ReadWrite,
   467  			})
   468  		}
   469  	}
   470  	specg.ImageVolumes = image
   471  	var overlay []*specgen.OverlayVolume
   472  	if len(conf.OverlayVolumes) != 0 {
   473  		for _, v := range conf.OverlayVolumes {
   474  			overlay = append(overlay, &specgen.OverlayVolume{
   475  				Source:      v.Source,
   476  				Destination: v.Dest,
   477  				Options:     v.Options,
   478  			})
   479  		}
   480  	}
   481  	specg.OverlayVolumes = overlay
   482  	_, mounts := c.SortUserVolumes(c.ConfigNoCopy().Spec)
   483  	specg.Mounts = mounts
   484  	specg.HostDeviceList = conf.DeviceHostSrc
   485  	specg.Networks = conf.Networks
   486  	specg.ShmSize = &conf.ShmSize
   487  	specg.ShmSizeSystemd = &conf.ShmSizeSystemd
   488  
   489  	mapSecurityConfig(conf, specg)
   490  
   491  	if c.IsInfra() { // if we are creating this spec for a pod's infra ctr, map the compatible options
   492  		spec, err := json.Marshal(specg)
   493  		if err != nil {
   494  			return nil, nil, err
   495  		}
   496  		infraInherit := &libpod.InfraInherit{}
   497  		err = json.Unmarshal(spec, infraInherit)
   498  		return c, infraInherit, err
   499  	}
   500  	// else just return the container
   501  	return c, nil, nil
   502  }
   503  
   504  // mapSecurityConfig takes a libpod.ContainerSecurityConfig and converts it to a specgen.ContinerSecurityConfig
   505  func mapSecurityConfig(c *libpod.ContainerConfig, s *specgen.SpecGenerator) {
   506  	s.Privileged = c.Privileged
   507  	s.SelinuxOpts = append(s.SelinuxOpts, c.LabelOpts...)
   508  	s.User = c.User
   509  	s.Groups = c.Groups
   510  	s.HostUsers = c.HostUsers
   511  }
   512  
   513  // Check name looks for existing containers/pods with the same name, and modifies the given string until a new name is found
   514  func CheckName(rt *libpod.Runtime, n string, kind bool) string {
   515  	switch {
   516  	case strings.Contains(n, "-clone"):
   517  		ind := strings.Index(n, "-clone") + 6
   518  		num, err := strconv.Atoi(n[ind:])
   519  		if num == 0 && err != nil { // clone1 is hard to get with this logic, just check for it here.
   520  			if kind {
   521  				_, err = rt.LookupContainer(n + "1")
   522  			} else {
   523  				_, err = rt.LookupPod(n + "1")
   524  			}
   525  
   526  			if err != nil {
   527  				n += "1"
   528  				break
   529  			}
   530  		} else {
   531  			n = n[0:ind]
   532  		}
   533  		err = nil
   534  		count := num
   535  		for err == nil {
   536  			count++
   537  			tempN := n + strconv.Itoa(count)
   538  			if kind {
   539  				_, err = rt.LookupContainer(tempN)
   540  			} else {
   541  				_, err = rt.LookupPod(tempN)
   542  			}
   543  		}
   544  		n += strconv.Itoa(count)
   545  	default:
   546  		n += "-clone"
   547  	}
   548  	return n
   549  }