github.com/hanks177/podman/v4@v4.1.3-0.20220613032544-16d90015bc83/libpod/container.go (about)

     1  package libpod
     2  
     3  import (
     4  	"bytes"
     5  	"fmt"
     6  	"io/ioutil"
     7  	"net"
     8  	"os"
     9  	"strings"
    10  	"time"
    11  
    12  	types040 "github.com/containernetworking/cni/pkg/types/040"
    13  	"github.com/containers/common/libnetwork/cni"
    14  	"github.com/containers/common/libnetwork/types"
    15  	"github.com/containers/common/pkg/config"
    16  	"github.com/containers/common/pkg/secrets"
    17  	"github.com/containers/image/v5/manifest"
    18  	"github.com/hanks177/podman/v4/libpod/define"
    19  	"github.com/hanks177/podman/v4/libpod/lock"
    20  	"github.com/containers/storage"
    21  	spec "github.com/opencontainers/runtime-spec/specs-go"
    22  	"github.com/pkg/errors"
    23  	"github.com/sirupsen/logrus"
    24  )
    25  
    26  // CgroupfsDefaultCgroupParent is the cgroup parent for CgroupFS in libpod
    27  const CgroupfsDefaultCgroupParent = "/libpod_parent"
    28  
    29  // SystemdDefaultCgroupParent is the cgroup parent for the systemd cgroup
    30  // manager in libpod
    31  const SystemdDefaultCgroupParent = "machine.slice"
    32  
    33  // SystemdDefaultRootlessCgroupParent is the cgroup parent for the systemd cgroup
    34  // manager in libpod when running as rootless
    35  const SystemdDefaultRootlessCgroupParent = "user.slice"
    36  
    37  // DefaultWaitInterval is the default interval between container status checks
    38  // while waiting.
    39  const DefaultWaitInterval = 250 * time.Millisecond
    40  
    41  // LinuxNS represents a Linux namespace
    42  type LinuxNS int
    43  
    44  const (
    45  	// InvalidNS is an invalid namespace
    46  	InvalidNS LinuxNS = iota
    47  	// IPCNS is the IPC namespace
    48  	IPCNS LinuxNS = iota
    49  	// MountNS is the mount namespace
    50  	MountNS LinuxNS = iota
    51  	// NetNS is the network namespace
    52  	NetNS LinuxNS = iota
    53  	// PIDNS is the PID namespace
    54  	PIDNS LinuxNS = iota
    55  	// UserNS is the user namespace
    56  	UserNS LinuxNS = iota
    57  	// UTSNS is the UTS namespace
    58  	UTSNS LinuxNS = iota
    59  	// CgroupNS is the Cgroup namespace
    60  	CgroupNS LinuxNS = iota
    61  )
    62  
    63  // String returns a string representation of a Linux namespace
    64  // It is guaranteed to be the name of the namespace in /proc for valid ns types
    65  func (ns LinuxNS) String() string {
    66  	switch ns {
    67  	case InvalidNS:
    68  		return "invalid"
    69  	case IPCNS:
    70  		return "ipc"
    71  	case MountNS:
    72  		return "mnt"
    73  	case NetNS:
    74  		return "net"
    75  	case PIDNS:
    76  		return "pid"
    77  	case UserNS:
    78  		return "user"
    79  	case UTSNS:
    80  		return "uts"
    81  	case CgroupNS:
    82  		return "cgroup"
    83  	default:
    84  		return "unknown"
    85  	}
    86  }
    87  
    88  // Container is a single OCI container.
    89  // All operations on a Container that access state must begin with a call to
    90  // syncContainer().
    91  // There is no guarantee that state exists in a readable state before
    92  // syncContainer() is run, and even if it does, its contents will be out of date
    93  // and must be refreshed from the database.
    94  // Generally, this requirement applies only to top-level functions; helpers can
    95  // assume that their callers handled this requirement. Generally speaking, if a
    96  // function takes the container lock and accesses any part of state, it should
    97  // syncContainer() immediately after locking.
    98  type Container struct {
    99  	config *ContainerConfig
   100  
   101  	state *ContainerState
   102  
   103  	// Batched indicates that a container has been locked as part of a
   104  	// Batch() operation
   105  	// Functions called on a batched container will not lock or sync
   106  	batched bool
   107  
   108  	valid      bool
   109  	lock       lock.Locker
   110  	runtime    *Runtime
   111  	ociRuntime OCIRuntime
   112  
   113  	rootlessSlirpSyncR *os.File
   114  	rootlessSlirpSyncW *os.File
   115  
   116  	rootlessPortSyncR *os.File
   117  	rootlessPortSyncW *os.File
   118  
   119  	// perNetworkOpts should be set when you want to use special network
   120  	// options when calling network setup/teardown. This should be used for
   121  	// container restore or network reload for example. Leave this nil if
   122  	// the settings from the container config should be used.
   123  	perNetworkOpts map[string]types.PerNetworkOptions
   124  
   125  	// This is true if a container is restored from a checkpoint.
   126  	restoreFromCheckpoint bool
   127  
   128  	// Used to query the NOTIFY_SOCKET once along with setting up
   129  	// mounts etc.
   130  	notifySocket string
   131  
   132  	slirp4netnsSubnet *net.IPNet
   133  }
   134  
   135  // ContainerState contains the current state of the container
   136  // It is stored on disk in a tmpfs and recreated on reboot
   137  type ContainerState struct {
   138  	// The current state of the running container
   139  	State define.ContainerStatus `json:"state"`
   140  	// The path to the JSON OCI runtime spec for this container
   141  	ConfigPath string `json:"configPath,omitempty"`
   142  	// RunDir is a per-boot directory for container content
   143  	RunDir string `json:"runDir,omitempty"`
   144  	// Mounted indicates whether the container's storage has been mounted
   145  	// for use
   146  	Mounted bool `json:"mounted,omitempty"`
   147  	// Mountpoint contains the path to the container's mounted storage as given
   148  	// by containers/storage.
   149  	Mountpoint string `json:"mountPoint,omitempty"`
   150  	// StartedTime is the time the container was started
   151  	StartedTime time.Time `json:"startedTime,omitempty"`
   152  	// FinishedTime is the time the container finished executing
   153  	FinishedTime time.Time `json:"finishedTime,omitempty"`
   154  	// ExitCode is the exit code returned when the container stopped
   155  	ExitCode int32 `json:"exitCode,omitempty"`
   156  	// Exited is whether the container has exited
   157  	Exited bool `json:"exited,omitempty"`
   158  	// OOMKilled indicates that the container was killed as it ran out of
   159  	// memory
   160  	OOMKilled bool `json:"oomKilled,omitempty"`
   161  	// Checkpointed indicates that the container was stopped by a checkpoint
   162  	// operation.
   163  	Checkpointed bool `json:"checkpointed,omitempty"`
   164  	// PID is the PID of a running container
   165  	PID int `json:"pid,omitempty"`
   166  	// ConmonPID is the PID of the container's conmon
   167  	ConmonPID int `json:"conmonPid,omitempty"`
   168  	// ExecSessions contains all exec sessions that are associated with this
   169  	// container.
   170  	ExecSessions map[string]*ExecSession `json:"newExecSessions,omitempty"`
   171  	// LegacyExecSessions are legacy exec sessions from older versions of
   172  	// Podman.
   173  	// These are DEPRECATED and will be removed in a future release.
   174  	LegacyExecSessions map[string]*legacyExecSession `json:"execSessions,omitempty"`
   175  	// NetworkStatusOld contains the configuration results for all networks
   176  	// the pod is attached to. Only populated if we created a network
   177  	// namespace for the container, and the network namespace is currently
   178  	// active.
   179  	// These are DEPRECATED and will be removed in a future release.
   180  	// This field is only used for backwarts compatibility.
   181  	NetworkStatusOld []*types040.Result `json:"networkResults,omitempty"`
   182  	// NetworkStatus contains the network Status for all networks
   183  	// the container is attached to. Only populated if we created a network
   184  	// namespace for the container, and the network namespace is currently
   185  	// active.
   186  	// To read this field use container.getNetworkStatus() instead, this will
   187  	// take care of migrating the old DEPRECATED network status to the new format.
   188  	NetworkStatus map[string]types.StatusBlock `json:"networkStatus,omitempty"`
   189  	// BindMounts contains files that will be bind-mounted into the
   190  	// container when it is mounted.
   191  	// These include /etc/hosts and /etc/resolv.conf
   192  	// This maps the path the file will be mounted to in the container to
   193  	// the path of the file on disk outside the container
   194  	BindMounts map[string]string `json:"bindMounts,omitempty"`
   195  	// StoppedByUser indicates whether the container was stopped by an
   196  	// explicit call to the Stop() API.
   197  	StoppedByUser bool `json:"stoppedByUser,omitempty"`
   198  	// RestartPolicyMatch indicates whether the conditions for restart
   199  	// policy have been met.
   200  	RestartPolicyMatch bool `json:"restartPolicyMatch,omitempty"`
   201  	// RestartCount is how many times the container was restarted by its
   202  	// restart policy. This is NOT incremented by normal container restarts
   203  	// (only by restart policy).
   204  	RestartCount uint `json:"restartCount,omitempty"`
   205  
   206  	// ExtensionStageHooks holds hooks which will be executed by libpod
   207  	// and not delegated to the OCI runtime.
   208  	ExtensionStageHooks map[string][]spec.Hook `json:"extensionStageHooks,omitempty"`
   209  
   210  	// NetInterfaceDescriptions describe the relationship between a CNI
   211  	// network and an interface names
   212  	NetInterfaceDescriptions ContainerNetworkDescriptions `json:"networkDescriptions,omitempty"`
   213  
   214  	// Service indicates that container is the service container of a
   215  	// service. A service consists of one or more pods.  The service
   216  	// container is started before all pods and is stopped when the last
   217  	// pod stops. The service container allows for tracking and managing
   218  	// the entire life cycle of service which may be started via
   219  	// `podman-play-kube`.
   220  	Service Service
   221  
   222  	// containerPlatformState holds platform-specific container state.
   223  	containerPlatformState
   224  
   225  	// Following checkpoint/restore related information is displayed
   226  	// if the container has been checkpointed or restored.
   227  	CheckpointedTime time.Time `json:"checkpointedTime,omitempty"`
   228  	RestoredTime     time.Time `json:"restoredTime,omitempty"`
   229  	CheckpointLog    string    `json:"checkpointLog,omitempty"`
   230  	CheckpointPath   string    `json:"checkpointPath,omitempty"`
   231  	RestoreLog       string    `json:"restoreLog,omitempty"`
   232  	Restored         bool      `json:"restored,omitempty"`
   233  }
   234  
   235  // ContainerNamedVolume is a named volume that will be mounted into the
   236  // container. Each named volume is a libpod Volume present in the state.
   237  type ContainerNamedVolume struct {
   238  	// Name is the name of the volume to mount in.
   239  	// Must resolve to a valid volume present in this Podman.
   240  	Name string `json:"volumeName"`
   241  	// Dest is the mount's destination
   242  	Dest string `json:"dest"`
   243  	// Options are fstab style mount options
   244  	Options []string `json:"options,omitempty"`
   245  }
   246  
   247  // ContainerOverlayVolume is a overlay volume that will be mounted into the
   248  // container. Each volume is a libpod Volume present in the state.
   249  type ContainerOverlayVolume struct {
   250  	// Destination is the absolute path where the mount will be placed in the container.
   251  	Dest string `json:"dest"`
   252  	// Source specifies the source path of the mount.
   253  	Source string `json:"source,omitempty"`
   254  	// Options holds overlay volume options.
   255  	Options []string `json:"options,omitempty"`
   256  }
   257  
   258  // ContainerImageVolume is a volume based on a container image.  The container
   259  // image is first mounted on the host and is then bind-mounted into the
   260  // container.
   261  type ContainerImageVolume struct {
   262  	// Source is the source of the image volume.  The image can be referred
   263  	// to by name and by ID.
   264  	Source string `json:"source"`
   265  	// Dest is the absolute path of the mount in the container.
   266  	Dest string `json:"dest"`
   267  	// ReadWrite sets the volume writable.
   268  	ReadWrite bool `json:"rw"`
   269  }
   270  
   271  // ContainerSecret is a secret that is mounted in a container
   272  type ContainerSecret struct {
   273  	// Secret is the secret
   274  	*secrets.Secret
   275  	// UID is the UID of the secret file
   276  	UID uint32
   277  	// GID is the GID of the secret file
   278  	GID uint32
   279  	// Mode is the mode of the secret file
   280  	Mode uint32
   281  	// Secret target inside container
   282  	Target string
   283  }
   284  
   285  // ContainerNetworkDescriptions describes the relationship between the CNI
   286  // network and the ethN where N is an integer
   287  type ContainerNetworkDescriptions map[string]int
   288  
   289  // Config accessors
   290  // Unlocked
   291  
   292  // Config returns the configuration used to create the container.
   293  // Note that the returned config does not include the actual networks.
   294  // Use ConfigWithNetworks() if you need them.
   295  func (c *Container) Config() *ContainerConfig {
   296  	returnConfig := new(ContainerConfig)
   297  	if err := JSONDeepCopy(c.config, returnConfig); err != nil {
   298  		return nil
   299  	}
   300  	return returnConfig
   301  }
   302  
   303  // Config returns the configuration used to create the container.
   304  func (c *Container) ConfigWithNetworks() *ContainerConfig {
   305  	returnConfig := c.Config()
   306  	if returnConfig == nil {
   307  		return nil
   308  	}
   309  
   310  	networks, err := c.networks()
   311  	if err != nil {
   312  		return nil
   313  	}
   314  	returnConfig.Networks = networks
   315  
   316  	return returnConfig
   317  }
   318  
   319  // ConfigNoCopy returns the configuration used by the container.
   320  // Note that the returned value is not a copy and must hence
   321  // only be used in a reading fashion.
   322  func (c *Container) ConfigNoCopy() *ContainerConfig {
   323  	return c.config
   324  }
   325  
   326  // DeviceHostSrc returns the user supplied device to be passed down in the pod
   327  func (c *Container) DeviceHostSrc() []spec.LinuxDevice {
   328  	return c.config.DeviceHostSrc
   329  }
   330  
   331  // Runtime returns the container's Runtime.
   332  func (c *Container) Runtime() *Runtime {
   333  	return c.runtime
   334  }
   335  
   336  // Spec returns the container's OCI runtime spec
   337  // The spec returned is the one used to create the container. The running
   338  // spec may differ slightly as mounts are added based on the image
   339  func (c *Container) Spec() *spec.Spec {
   340  	returnSpec := new(spec.Spec)
   341  	if err := JSONDeepCopy(c.config.Spec, returnSpec); err != nil {
   342  		return nil
   343  	}
   344  
   345  	return returnSpec
   346  }
   347  
   348  // specFromState returns the unmarshalled json config of the container.  If the
   349  // config does not exist (e.g., because the container was never started) return
   350  // the spec from the config.
   351  func (c *Container) specFromState() (*spec.Spec, error) {
   352  	returnSpec := c.config.Spec
   353  
   354  	if f, err := os.Open(c.state.ConfigPath); err == nil {
   355  		returnSpec = new(spec.Spec)
   356  		content, err := ioutil.ReadAll(f)
   357  		if err != nil {
   358  			return nil, errors.Wrapf(err, "error reading container config")
   359  		}
   360  		if err := json.Unmarshal(content, &returnSpec); err != nil {
   361  			return nil, errors.Wrapf(err, "error unmarshalling container config")
   362  		}
   363  	} else if !os.IsNotExist(err) {
   364  		// ignore when the file does not exist
   365  		return nil, errors.Wrapf(err, "error opening container config")
   366  	}
   367  
   368  	return returnSpec, nil
   369  }
   370  
   371  // ID returns the container's ID
   372  func (c *Container) ID() string {
   373  	return c.config.ID
   374  }
   375  
   376  // Name returns the container's name
   377  func (c *Container) Name() string {
   378  	return c.config.Name
   379  }
   380  
   381  // PodID returns the full ID of the pod the container belongs to, or "" if it
   382  // does not belong to a pod
   383  func (c *Container) PodID() string {
   384  	return c.config.Pod
   385  }
   386  
   387  // Namespace returns the libpod namespace the container is in.
   388  // Namespaces are used to logically separate containers and pods in the state.
   389  func (c *Container) Namespace() string {
   390  	return c.config.Namespace
   391  }
   392  
   393  // Image returns the ID and name of the image used as the container's rootfs.
   394  func (c *Container) Image() (string, string) {
   395  	return c.config.RootfsImageID, c.config.RootfsImageName
   396  }
   397  
   398  // RawImageName returns the unprocessed and not-normalized user-specified image
   399  // name.
   400  func (c *Container) RawImageName() string {
   401  	return c.config.RawImageName
   402  }
   403  
   404  // ShmDir returns the sources path to be mounted on /dev/shm in container
   405  func (c *Container) ShmDir() string {
   406  	return c.config.ShmDir
   407  }
   408  
   409  // ShmSize returns the size of SHM device to be mounted into the container
   410  func (c *Container) ShmSize() int64 {
   411  	return c.config.ShmSize
   412  }
   413  
   414  // StaticDir returns the directory used to store persistent container files
   415  func (c *Container) StaticDir() string {
   416  	return c.config.StaticDir
   417  }
   418  
   419  // NamedVolumes returns the container's named volumes.
   420  // The name of each is guaranteed to point to a valid libpod Volume present in
   421  // the state.
   422  func (c *Container) NamedVolumes() []*ContainerNamedVolume {
   423  	volumes := []*ContainerNamedVolume{}
   424  	for _, vol := range c.config.NamedVolumes {
   425  		newVol := new(ContainerNamedVolume)
   426  		newVol.Name = vol.Name
   427  		newVol.Dest = vol.Dest
   428  		newVol.Options = vol.Options
   429  		volumes = append(volumes, newVol)
   430  	}
   431  
   432  	return volumes
   433  }
   434  
   435  // Privileged returns whether the container is privileged
   436  func (c *Container) Privileged() bool {
   437  	return c.config.Privileged
   438  }
   439  
   440  // ProcessLabel returns the selinux ProcessLabel of the container
   441  func (c *Container) ProcessLabel() string {
   442  	return c.config.ProcessLabel
   443  }
   444  
   445  // MountLabel returns the SELinux mount label of the container
   446  func (c *Container) MountLabel() string {
   447  	return c.config.MountLabel
   448  }
   449  
   450  // Systemd returns whether the container will be running in systemd mode
   451  func (c *Container) Systemd() bool {
   452  	if c.config.Systemd != nil {
   453  		return *c.config.Systemd
   454  	}
   455  	return false
   456  }
   457  
   458  // User returns the user who the container is run as
   459  func (c *Container) User() string {
   460  	return c.config.User
   461  }
   462  
   463  // Dependencies gets the containers this container depends upon
   464  func (c *Container) Dependencies() []string {
   465  	// Collect in a map first to remove dupes
   466  	dependsCtrs := map[string]bool{}
   467  
   468  	// First add all namespace containers
   469  	if c.config.IPCNsCtr != "" {
   470  		dependsCtrs[c.config.IPCNsCtr] = true
   471  	}
   472  	if c.config.MountNsCtr != "" {
   473  		dependsCtrs[c.config.MountNsCtr] = true
   474  	}
   475  	if c.config.NetNsCtr != "" {
   476  		dependsCtrs[c.config.NetNsCtr] = true
   477  	}
   478  	if c.config.PIDNsCtr != "" {
   479  		dependsCtrs[c.config.PIDNsCtr] = true
   480  	}
   481  	if c.config.UserNsCtr != "" {
   482  		dependsCtrs[c.config.UserNsCtr] = true
   483  	}
   484  	if c.config.UTSNsCtr != "" {
   485  		dependsCtrs[c.config.UTSNsCtr] = true
   486  	}
   487  	if c.config.CgroupNsCtr != "" {
   488  		dependsCtrs[c.config.CgroupNsCtr] = true
   489  	}
   490  
   491  	// Add all generic dependencies
   492  	for _, id := range c.config.Dependencies {
   493  		dependsCtrs[id] = true
   494  	}
   495  
   496  	if len(dependsCtrs) == 0 {
   497  		return []string{}
   498  	}
   499  
   500  	depends := make([]string, 0, len(dependsCtrs))
   501  	for ctr := range dependsCtrs {
   502  		depends = append(depends, ctr)
   503  	}
   504  
   505  	return depends
   506  }
   507  
   508  // NewNetNS returns whether the container will create a new network namespace
   509  func (c *Container) NewNetNS() bool {
   510  	return c.config.CreateNetNS
   511  }
   512  
   513  // PortMappings returns the ports that will be mapped into a container if
   514  // a new network namespace is created
   515  // If NewNetNS() is false, this value is unused
   516  func (c *Container) PortMappings() ([]types.PortMapping, error) {
   517  	// First check if the container belongs to a network namespace (like a pod)
   518  	if len(c.config.NetNsCtr) > 0 {
   519  		netNsCtr, err := c.runtime.GetContainer(c.config.NetNsCtr)
   520  		if err != nil {
   521  			return nil, errors.Wrapf(err, "unable to lookup network namespace for container %s", c.ID())
   522  		}
   523  		return netNsCtr.PortMappings()
   524  	}
   525  	return c.config.PortMappings, nil
   526  }
   527  
   528  // DNSServers returns DNS servers that will be used in the container's
   529  // resolv.conf
   530  // If empty, DNS server from the host's resolv.conf will be used instead
   531  func (c *Container) DNSServers() []net.IP {
   532  	return c.config.DNSServer
   533  }
   534  
   535  // DNSSearch returns the DNS search domains that will be used in the container's
   536  // resolv.conf
   537  // If empty, DNS Search domains from the host's resolv.conf will be used instead
   538  func (c *Container) DNSSearch() []string {
   539  	return c.config.DNSSearch
   540  }
   541  
   542  // DNSOption returns the DNS options that will be used in the container's
   543  // resolv.conf
   544  // If empty, options from the host's resolv.conf will be used instead
   545  func (c *Container) DNSOption() []string {
   546  	return c.config.DNSOption
   547  }
   548  
   549  // HostsAdd returns hosts that will be added to the container's hosts file
   550  // The host system's hosts file is used as a base, and these are appended to it
   551  func (c *Container) HostsAdd() []string {
   552  	return c.config.HostAdd
   553  }
   554  
   555  // UserVolumes returns user-added volume mounts in the container.
   556  // These are not added to the spec, but are used during image commit and to
   557  // trigger some OCI hooks.
   558  func (c *Container) UserVolumes() []string {
   559  	volumes := make([]string, 0, len(c.config.UserVolumes))
   560  	volumes = append(volumes, c.config.UserVolumes...)
   561  	return volumes
   562  }
   563  
   564  // Entrypoint is the container's entrypoint.
   565  // This is not added to the spec, but is instead used during image commit.
   566  func (c *Container) Entrypoint() []string {
   567  	entrypoint := make([]string, 0, len(c.config.Entrypoint))
   568  	entrypoint = append(entrypoint, c.config.Entrypoint...)
   569  	return entrypoint
   570  }
   571  
   572  // Command is the container's command
   573  // This is not added to the spec, but is instead used during image commit
   574  func (c *Container) Command() []string {
   575  	command := make([]string, 0, len(c.config.Command))
   576  	command = append(command, c.config.Command...)
   577  	return command
   578  }
   579  
   580  // Stdin returns whether STDIN on the container will be kept open
   581  func (c *Container) Stdin() bool {
   582  	return c.config.Stdin
   583  }
   584  
   585  // Labels returns the container's labels
   586  func (c *Container) Labels() map[string]string {
   587  	labels := make(map[string]string)
   588  	for key, value := range c.config.Labels {
   589  		labels[key] = value
   590  	}
   591  	return labels
   592  }
   593  
   594  // StopSignal is the signal that will be used to stop the container
   595  // If it fails to stop the container, SIGKILL will be used after a timeout
   596  // If StopSignal is 0, the default signal of SIGTERM will be used
   597  func (c *Container) StopSignal() uint {
   598  	return c.config.StopSignal
   599  }
   600  
   601  // StopTimeout returns the container's stop timeout
   602  // If the container's default stop signal fails to kill the container, SIGKILL
   603  // will be used after this timeout
   604  func (c *Container) StopTimeout() uint {
   605  	return c.config.StopTimeout
   606  }
   607  
   608  // CreatedTime gets the time when the container was created
   609  func (c *Container) CreatedTime() time.Time {
   610  	return c.config.CreatedTime
   611  }
   612  
   613  // CgroupParent gets the container's Cgroup parent
   614  func (c *Container) CgroupParent() string {
   615  	return c.config.CgroupParent
   616  }
   617  
   618  // LogPath returns the path to the container's log file
   619  // This file will only be present after Init() is called to create the container
   620  // in the runtime
   621  func (c *Container) LogPath() string {
   622  	return c.config.LogPath
   623  }
   624  
   625  // LogTag returns the tag to the container's log file
   626  func (c *Container) LogTag() string {
   627  	return c.config.LogTag
   628  }
   629  
   630  // RestartPolicy returns the container's restart policy.
   631  func (c *Container) RestartPolicy() string {
   632  	return c.config.RestartPolicy
   633  }
   634  
   635  // RestartRetries returns the number of retries that will be attempted when
   636  // using the "on-failure" restart policy
   637  func (c *Container) RestartRetries() uint {
   638  	return c.config.RestartRetries
   639  }
   640  
   641  // LogDriver returns the log driver for this container
   642  func (c *Container) LogDriver() string {
   643  	return c.config.LogDriver
   644  }
   645  
   646  // RuntimeName returns the name of the runtime
   647  func (c *Container) RuntimeName() string {
   648  	return c.config.OCIRuntime
   649  }
   650  
   651  // Runtime spec accessors
   652  // Unlocked
   653  
   654  // Hostname gets the container's hostname
   655  func (c *Container) Hostname() string {
   656  	if c.config.UTSNsCtr != "" {
   657  		utsNsCtr, err := c.runtime.GetContainer(c.config.UTSNsCtr)
   658  		if err != nil {
   659  			// should we return an error here?
   660  			logrus.Errorf("unable to lookup uts namespace for container %s: %v", c.ID(), err)
   661  			return ""
   662  		}
   663  		return utsNsCtr.Hostname()
   664  	}
   665  	if c.config.Spec.Hostname != "" {
   666  		return c.config.Spec.Hostname
   667  	}
   668  
   669  	if len(c.ID()) < 11 {
   670  		return c.ID()
   671  	}
   672  	return c.ID()[:12]
   673  }
   674  
   675  // WorkingDir returns the containers working dir
   676  func (c *Container) WorkingDir() string {
   677  	if c.config.Spec.Process != nil {
   678  		return c.config.Spec.Process.Cwd
   679  	}
   680  	return "/"
   681  }
   682  
   683  // State Accessors
   684  // Require locking
   685  
   686  // State returns the current state of the container
   687  func (c *Container) State() (define.ContainerStatus, error) {
   688  	if !c.batched {
   689  		c.lock.Lock()
   690  		defer c.lock.Unlock()
   691  
   692  		if err := c.syncContainer(); err != nil {
   693  			return define.ContainerStateUnknown, err
   694  		}
   695  	}
   696  	return c.state.State, nil
   697  }
   698  
   699  // Mounted returns whether the container is mounted and the path it is mounted
   700  // at (if it is mounted).
   701  // If the container is not mounted, no error is returned, and the mountpoint
   702  // will be set to "".
   703  func (c *Container) Mounted() (bool, string, error) {
   704  	if !c.batched {
   705  		c.lock.Lock()
   706  		defer c.lock.Unlock()
   707  		if err := c.syncContainer(); err != nil {
   708  			return false, "", errors.Wrapf(err, "error updating container %s state", c.ID())
   709  		}
   710  	}
   711  	// We cannot directly return c.state.Mountpoint as it is not guaranteed
   712  	// to be set if the container is mounted, only if the container has been
   713  	// prepared with c.prepare().
   714  	// Instead, let's call into c/storage
   715  	mountedTimes, err := c.runtime.storageService.MountedContainerImage(c.ID())
   716  	if err != nil {
   717  		return false, "", err
   718  	}
   719  
   720  	if mountedTimes > 0 {
   721  		mountPoint, err := c.runtime.storageService.GetMountpoint(c.ID())
   722  		if err != nil {
   723  			return false, "", err
   724  		}
   725  
   726  		return true, mountPoint, nil
   727  	}
   728  
   729  	return false, "", nil
   730  }
   731  
   732  // StartedTime is the time the container was started
   733  func (c *Container) StartedTime() (time.Time, error) {
   734  	if !c.batched {
   735  		c.lock.Lock()
   736  		defer c.lock.Unlock()
   737  		if err := c.syncContainer(); err != nil {
   738  			return time.Time{}, errors.Wrapf(err, "error updating container %s state", c.ID())
   739  		}
   740  	}
   741  	return c.state.StartedTime, nil
   742  }
   743  
   744  // FinishedTime is the time the container was stopped
   745  func (c *Container) FinishedTime() (time.Time, error) {
   746  	if !c.batched {
   747  		c.lock.Lock()
   748  		defer c.lock.Unlock()
   749  		if err := c.syncContainer(); err != nil {
   750  			return time.Time{}, errors.Wrapf(err, "error updating container %s state", c.ID())
   751  		}
   752  	}
   753  	return c.state.FinishedTime, nil
   754  }
   755  
   756  // ExitCode returns the exit code of the container as
   757  // an int32, and whether the container has exited.
   758  // If the container has not exited, exit code will always be 0.
   759  // If the container restarts, the exit code is reset to 0.
   760  func (c *Container) ExitCode() (int32, bool, error) {
   761  	if !c.batched {
   762  		c.lock.Lock()
   763  		defer c.lock.Unlock()
   764  		if err := c.syncContainer(); err != nil {
   765  			return 0, false, errors.Wrapf(err, "error updating container %s state", c.ID())
   766  		}
   767  	}
   768  	return c.state.ExitCode, c.state.Exited, nil
   769  }
   770  
   771  // OOMKilled returns whether the container was killed by an OOM condition
   772  func (c *Container) OOMKilled() (bool, error) {
   773  	if !c.batched {
   774  		c.lock.Lock()
   775  		defer c.lock.Unlock()
   776  		if err := c.syncContainer(); err != nil {
   777  			return false, errors.Wrapf(err, "error updating container %s state", c.ID())
   778  		}
   779  	}
   780  	return c.state.OOMKilled, nil
   781  }
   782  
   783  // PID returns the PID of the container.
   784  // If the container is not running, a pid of 0 will be returned. No error will
   785  // occur.
   786  func (c *Container) PID() (int, error) {
   787  	if !c.batched {
   788  		c.lock.Lock()
   789  		defer c.lock.Unlock()
   790  
   791  		if err := c.syncContainer(); err != nil {
   792  			return -1, err
   793  		}
   794  	}
   795  
   796  	return c.state.PID, nil
   797  }
   798  
   799  // ConmonPID Returns the PID of the container's conmon process.
   800  // If the container is not running, a PID of 0 will be returned. No error will
   801  // occur.
   802  func (c *Container) ConmonPID() (int, error) {
   803  	if !c.batched {
   804  		c.lock.Lock()
   805  		defer c.lock.Unlock()
   806  
   807  		if err := c.syncContainer(); err != nil {
   808  			return -1, err
   809  		}
   810  	}
   811  
   812  	return c.state.ConmonPID, nil
   813  }
   814  
   815  // ExecSessions retrieves active exec sessions running in the container
   816  func (c *Container) ExecSessions() ([]string, error) {
   817  	if !c.batched {
   818  		c.lock.Lock()
   819  		defer c.lock.Unlock()
   820  
   821  		if err := c.syncContainer(); err != nil {
   822  			return nil, err
   823  		}
   824  	}
   825  
   826  	ids := make([]string, 0, len(c.state.ExecSessions))
   827  	for id := range c.state.ExecSessions {
   828  		ids = append(ids, id)
   829  	}
   830  
   831  	return ids, nil
   832  }
   833  
   834  // execSessionNoCopy returns the associated exec session to id.
   835  // Note that the session is not a deep copy.
   836  func (c *Container) execSessionNoCopy(id string) (*ExecSession, error) {
   837  	if !c.batched {
   838  		c.lock.Lock()
   839  		defer c.lock.Unlock()
   840  
   841  		if err := c.syncContainer(); err != nil {
   842  			return nil, err
   843  		}
   844  	}
   845  
   846  	session, ok := c.state.ExecSessions[id]
   847  	if !ok {
   848  		return nil, errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found in container %s", id, c.ID())
   849  	}
   850  
   851  	return session, nil
   852  }
   853  
   854  // ExecSession retrieves detailed information on a single active exec session in
   855  // a container
   856  func (c *Container) ExecSession(id string) (*ExecSession, error) {
   857  	session, err := c.execSessionNoCopy(id)
   858  	if err != nil {
   859  		return nil, err
   860  	}
   861  
   862  	returnSession := new(ExecSession)
   863  	if err := JSONDeepCopy(session, returnSession); err != nil {
   864  		return nil, errors.Wrapf(err, "error copying contents of container %s exec session %s", c.ID(), session.ID())
   865  	}
   866  
   867  	return returnSession, nil
   868  }
   869  
   870  // BindMounts retrieves bind mounts that were created by libpod and will be
   871  // added to the container
   872  // All these mounts except /dev/shm are ignored if a mount in the given spec has
   873  // the same destination
   874  // These mounts include /etc/resolv.conf, /etc/hosts, and /etc/hostname
   875  // The return is formatted as a map from destination (mountpoint in the
   876  // container) to source (path of the file that will be mounted into the
   877  // container)
   878  // If the container has not been started yet, an empty map will be returned, as
   879  // the files in question are only created when the container is started.
   880  func (c *Container) BindMounts() (map[string]string, error) {
   881  	if !c.batched {
   882  		c.lock.Lock()
   883  		defer c.lock.Unlock()
   884  
   885  		if err := c.syncContainer(); err != nil {
   886  			return nil, err
   887  		}
   888  	}
   889  
   890  	newMap := make(map[string]string, len(c.state.BindMounts))
   891  
   892  	for key, val := range c.state.BindMounts {
   893  		newMap[key] = val
   894  	}
   895  
   896  	return newMap, nil
   897  }
   898  
   899  // StoppedByUser returns whether the container was last stopped by an explicit
   900  // call to the Stop() API, or whether it exited naturally.
   901  func (c *Container) StoppedByUser() (bool, error) {
   902  	if !c.batched {
   903  		c.lock.Lock()
   904  		defer c.lock.Unlock()
   905  
   906  		if err := c.syncContainer(); err != nil {
   907  			return false, err
   908  		}
   909  	}
   910  
   911  	return c.state.StoppedByUser, nil
   912  }
   913  
   914  // Misc Accessors
   915  // Most will require locking
   916  
   917  // NamespacePath returns the path of one of the container's namespaces
   918  // If the container is not running, an error will be returned
   919  func (c *Container) NamespacePath(linuxNS LinuxNS) (string, error) { //nolint:interfacer
   920  	if !c.batched {
   921  		c.lock.Lock()
   922  		defer c.lock.Unlock()
   923  		if err := c.syncContainer(); err != nil {
   924  			return "", errors.Wrapf(err, "error updating container %s state", c.ID())
   925  		}
   926  	}
   927  
   928  	return c.namespacePath(linuxNS)
   929  }
   930  
   931  // namespacePath returns the path of one of the container's namespaces
   932  // If the container is not running, an error will be returned
   933  func (c *Container) namespacePath(linuxNS LinuxNS) (string, error) { //nolint:interfacer
   934  	if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused {
   935  		return "", errors.Wrapf(define.ErrCtrStopped, "cannot get namespace path unless container %s is running", c.ID())
   936  	}
   937  
   938  	if linuxNS == InvalidNS {
   939  		return "", errors.Wrapf(define.ErrInvalidArg, "invalid namespace requested from container %s", c.ID())
   940  	}
   941  
   942  	return fmt.Sprintf("/proc/%d/ns/%s", c.state.PID, linuxNS.String()), nil
   943  }
   944  
   945  // CgroupManager returns the cgroup manager used by the given container.
   946  func (c *Container) CgroupManager() string {
   947  	cgroupManager := c.config.CgroupManager
   948  	if cgroupManager == "" {
   949  		cgroupManager = c.runtime.config.Engine.CgroupManager
   950  	}
   951  	return cgroupManager
   952  }
   953  
   954  // CgroupPath returns a cgroups "path" for the given container.
   955  // Note that the container must be running.  Otherwise, an error
   956  // is returned.
   957  func (c *Container) CgroupPath() (string, error) {
   958  	if !c.batched {
   959  		c.lock.Lock()
   960  		defer c.lock.Unlock()
   961  		if err := c.syncContainer(); err != nil {
   962  			return "", errors.Wrapf(err, "error updating container %s state", c.ID())
   963  		}
   964  	}
   965  	return c.cGroupPath()
   966  }
   967  
   968  // cGroupPath returns a cgroups "path" for the given container.
   969  // Note that the container must be running.  Otherwise, an error
   970  // is returned.
   971  // NOTE: only call this when owning the container's lock.
   972  func (c *Container) cGroupPath() (string, error) {
   973  	if c.config.NoCgroups || c.config.CgroupsMode == "disabled" {
   974  		return "", errors.Wrapf(define.ErrNoCgroups, "this container is not creating cgroups")
   975  	}
   976  	if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused {
   977  		return "", errors.Wrapf(define.ErrCtrStopped, "cannot get cgroup path unless container %s is running", c.ID())
   978  	}
   979  
   980  	// Read /proc/{PID}/cgroup and find the *longest* cgroup entry.  That's
   981  	// needed to account for hacks in cgroups v1, where each line in the
   982  	// file could potentially point to a cgroup.  The longest one, however,
   983  	// is the libpod-specific one we're looking for.
   984  	//
   985  	// See #8397 on the need for the longest-path look up.
   986  	//
   987  	// And another workaround for containers running systemd as the payload.
   988  	// containers running systemd moves themselves into a child subgroup of
   989  	// the named systemd cgroup hierarchy.  Ignore any named cgroups during
   990  	// the lookup.
   991  	// See #10602 for more details.
   992  	procPath := fmt.Sprintf("/proc/%d/cgroup", c.state.PID)
   993  	lines, err := ioutil.ReadFile(procPath)
   994  	if err != nil {
   995  		// If the file doesn't exist, it means the container could have been terminated
   996  		// so report it.
   997  		if os.IsNotExist(err) {
   998  			return "", errors.Wrapf(define.ErrCtrStopped, "cannot get cgroup path unless container %s is running", c.ID())
   999  		}
  1000  		return "", err
  1001  	}
  1002  
  1003  	var cgroupPath string
  1004  	for _, line := range bytes.Split(lines, []byte("\n")) {
  1005  		// skip last empty line
  1006  		if len(line) == 0 {
  1007  			continue
  1008  		}
  1009  		// cgroups(7) nails it down to three fields with the 3rd
  1010  		// pointing to the cgroup's path which works both on v1 and v2.
  1011  		fields := bytes.Split(line, []byte(":"))
  1012  		if len(fields) != 3 {
  1013  			logrus.Debugf("Error parsing cgroup: expected 3 fields but got %d: %s", len(fields), procPath)
  1014  			continue
  1015  		}
  1016  		// Ignore named cgroups like name=systemd.
  1017  		if bytes.Contains(fields[1], []byte("=")) {
  1018  			continue
  1019  		}
  1020  		path := string(fields[2])
  1021  		if len(path) > len(cgroupPath) {
  1022  			cgroupPath = path
  1023  		}
  1024  	}
  1025  
  1026  	if len(cgroupPath) == 0 {
  1027  		return "", errors.Errorf("could not find any cgroup in %q", procPath)
  1028  	}
  1029  
  1030  	cgroupManager := c.CgroupManager()
  1031  	switch {
  1032  	case c.config.CgroupsMode == cgroupSplit:
  1033  		name := fmt.Sprintf("/libpod-payload-%s/", c.ID())
  1034  		if index := strings.LastIndex(cgroupPath, name); index >= 0 {
  1035  			return cgroupPath[:index+len(name)-1], nil
  1036  		}
  1037  	case cgroupManager == config.CgroupfsCgroupsManager:
  1038  		name := fmt.Sprintf("/libpod-%s/", c.ID())
  1039  		if index := strings.LastIndex(cgroupPath, name); index >= 0 {
  1040  			return cgroupPath[:index+len(name)-1], nil
  1041  		}
  1042  	case cgroupManager == config.SystemdCgroupsManager:
  1043  		// When running under systemd, try to detect the scope that was requested
  1044  		// to be created.  It improves the heuristic since we report the first
  1045  		// cgroup that was created instead of the cgroup where PID 1 might have
  1046  		// moved to.
  1047  		name := fmt.Sprintf("/libpod-%s.scope/", c.ID())
  1048  		if index := strings.LastIndex(cgroupPath, name); index >= 0 {
  1049  			return cgroupPath[:index+len(name)-1], nil
  1050  		}
  1051  	}
  1052  
  1053  	return cgroupPath, nil
  1054  }
  1055  
  1056  // RootFsSize returns the root FS size of the container
  1057  func (c *Container) RootFsSize() (int64, error) {
  1058  	if !c.batched {
  1059  		c.lock.Lock()
  1060  		defer c.lock.Unlock()
  1061  		if err := c.syncContainer(); err != nil {
  1062  			return -1, errors.Wrapf(err, "error updating container %s state", c.ID())
  1063  		}
  1064  	}
  1065  	return c.rootFsSize()
  1066  }
  1067  
  1068  // RWSize returns the rw size of the container
  1069  func (c *Container) RWSize() (int64, error) {
  1070  	if !c.batched {
  1071  		c.lock.Lock()
  1072  		defer c.lock.Unlock()
  1073  		if err := c.syncContainer(); err != nil {
  1074  			return -1, errors.Wrapf(err, "error updating container %s state", c.ID())
  1075  		}
  1076  	}
  1077  	return c.rwSize()
  1078  }
  1079  
  1080  // IDMappings returns the UID/GID mapping used for the container
  1081  func (c *Container) IDMappings() storage.IDMappingOptions {
  1082  	return c.config.IDMappings
  1083  }
  1084  
  1085  // RootUID returns the root user mapping from container
  1086  func (c *Container) RootUID() int {
  1087  	if len(c.config.IDMappings.UIDMap) == 1 && c.config.IDMappings.UIDMap[0].Size == 1 {
  1088  		return c.config.IDMappings.UIDMap[0].HostID
  1089  	}
  1090  	for _, uidmap := range c.config.IDMappings.UIDMap {
  1091  		if uidmap.ContainerID == 0 {
  1092  			return uidmap.HostID
  1093  		}
  1094  	}
  1095  	return 0
  1096  }
  1097  
  1098  // RootGID returns the root user mapping from container
  1099  func (c *Container) RootGID() int {
  1100  	if len(c.config.IDMappings.GIDMap) == 1 && c.config.IDMappings.GIDMap[0].Size == 1 {
  1101  		return c.config.IDMappings.GIDMap[0].HostID
  1102  	}
  1103  	for _, gidmap := range c.config.IDMappings.GIDMap {
  1104  		if gidmap.ContainerID == 0 {
  1105  			return gidmap.HostID
  1106  		}
  1107  	}
  1108  	return 0
  1109  }
  1110  
  1111  // IsInfra returns whether the container is an infra container
  1112  func (c *Container) IsInfra() bool {
  1113  	return c.config.IsInfra
  1114  }
  1115  
  1116  // IsInitCtr returns whether the container is an init container
  1117  func (c *Container) IsInitCtr() bool {
  1118  	return len(c.config.InitContainerType) > 0
  1119  }
  1120  
  1121  // IsReadOnly returns whether the container is running in read only mode
  1122  func (c *Container) IsReadOnly() bool {
  1123  	return c.config.Spec.Root.Readonly
  1124  }
  1125  
  1126  // NetworkDisabled returns whether the container is running with a disabled network
  1127  func (c *Container) NetworkDisabled() (bool, error) {
  1128  	if c.config.NetNsCtr != "" {
  1129  		container, err := c.runtime.state.Container(c.config.NetNsCtr)
  1130  		if err != nil {
  1131  			return false, err
  1132  		}
  1133  		return container.NetworkDisabled()
  1134  	}
  1135  	return networkDisabled(c)
  1136  }
  1137  
  1138  func networkDisabled(c *Container) (bool, error) {
  1139  	if c.config.CreateNetNS {
  1140  		return false, nil
  1141  	}
  1142  	if !c.config.PostConfigureNetNS {
  1143  		for _, ns := range c.config.Spec.Linux.Namespaces {
  1144  			if ns.Type == spec.NetworkNamespace {
  1145  				return ns.Path == "", nil
  1146  			}
  1147  		}
  1148  	}
  1149  	return false, nil
  1150  }
  1151  
  1152  func (c *Container) HostNetwork() bool {
  1153  	if c.config.CreateNetNS || c.config.NetNsCtr != "" {
  1154  		return false
  1155  	}
  1156  	for _, ns := range c.config.Spec.Linux.Namespaces {
  1157  		if ns.Type == spec.NetworkNamespace {
  1158  			return false
  1159  		}
  1160  	}
  1161  	return true
  1162  }
  1163  
  1164  // ContainerState returns containerstate struct
  1165  func (c *Container) ContainerState() (*ContainerState, error) {
  1166  	if !c.batched {
  1167  		c.lock.Lock()
  1168  		defer c.lock.Unlock()
  1169  
  1170  		if err := c.syncContainer(); err != nil {
  1171  			return nil, err
  1172  		}
  1173  	}
  1174  	returnConfig := new(ContainerState)
  1175  	if err := JSONDeepCopy(c.state, returnConfig); err != nil {
  1176  		return nil, errors.Wrapf(err, "error copying container %s state", c.ID())
  1177  	}
  1178  	return c.state, nil
  1179  }
  1180  
  1181  // HasHealthCheck returns bool as to whether there is a health check
  1182  // defined for the container
  1183  func (c *Container) HasHealthCheck() bool {
  1184  	return c.config.HealthCheckConfig != nil
  1185  }
  1186  
  1187  // HealthCheckConfig returns the command and timing attributes of the health check
  1188  func (c *Container) HealthCheckConfig() *manifest.Schema2HealthConfig {
  1189  	return c.config.HealthCheckConfig
  1190  }
  1191  
  1192  // AutoRemove indicates whether the container will be removed after it is executed
  1193  func (c *Container) AutoRemove() bool {
  1194  	spec := c.config.Spec
  1195  	if spec.Annotations == nil {
  1196  		return false
  1197  	}
  1198  	return spec.Annotations[define.InspectAnnotationAutoremove] == define.InspectResponseTrue
  1199  }
  1200  
  1201  // Timezone returns the timezone configured inside the container.
  1202  // Local means it has the same timezone as the host machine
  1203  func (c *Container) Timezone() string {
  1204  	return c.config.Timezone
  1205  }
  1206  
  1207  // Umask returns the Umask bits configured inside the container.
  1208  func (c *Container) Umask() string {
  1209  	return c.config.Umask
  1210  }
  1211  
  1212  // Secrets return the secrets in the container
  1213  func (c *Container) Secrets() []*ContainerSecret {
  1214  	return c.config.Secrets
  1215  }
  1216  
  1217  // Networks gets all the networks this container is connected to.
  1218  // Please do NOT use ctr.config.Networks, as this can be changed from those
  1219  // values at runtime via network connect and disconnect.
  1220  // If the container is configured to use CNI and this function returns an empty
  1221  // array, the container will still be connected to the default network.
  1222  // The second return parameter, a bool, indicates that the container container
  1223  // is joining the default CNI network - the network name will be included in the
  1224  // returned array of network names, but the container did not explicitly join
  1225  // this network.
  1226  func (c *Container) Networks() ([]string, error) {
  1227  	if !c.batched {
  1228  		c.lock.Lock()
  1229  		defer c.lock.Unlock()
  1230  
  1231  		if err := c.syncContainer(); err != nil {
  1232  			return nil, err
  1233  		}
  1234  	}
  1235  
  1236  	networks, err := c.networks()
  1237  	if err != nil {
  1238  		return nil, err
  1239  	}
  1240  
  1241  	names := make([]string, 0, len(networks))
  1242  
  1243  	for name := range networks {
  1244  		names = append(names, name)
  1245  	}
  1246  
  1247  	return names, nil
  1248  }
  1249  
  1250  // NetworkMode gets the configured network mode for the container.
  1251  // Get actual value from the database
  1252  func (c *Container) NetworkMode() string {
  1253  	networkMode := ""
  1254  	ctrSpec := c.config.Spec
  1255  
  1256  	switch {
  1257  	case c.config.CreateNetNS:
  1258  		// We actually store the network
  1259  		// mode for Slirp and Bridge, so
  1260  		// we can just use that
  1261  		networkMode = string(c.config.NetMode)
  1262  	case c.config.NetNsCtr != "":
  1263  		networkMode = fmt.Sprintf("container:%s", c.config.NetNsCtr)
  1264  	default:
  1265  		// Find the spec's network namespace.
  1266  		// If there is none, it's host networking.
  1267  		// If there is one and it has a path, it's "ns:".
  1268  		foundNetNS := false
  1269  		for _, ns := range ctrSpec.Linux.Namespaces {
  1270  			if ns.Type == spec.NetworkNamespace {
  1271  				foundNetNS = true
  1272  				if ns.Path != "" {
  1273  					networkMode = fmt.Sprintf("ns:%s", ns.Path)
  1274  				} else {
  1275  					// We're making a network ns,  but not
  1276  					// configuring with Slirp or CNI. That
  1277  					// means it's --net=none
  1278  					networkMode = "none"
  1279  				}
  1280  				break
  1281  			}
  1282  		}
  1283  		if !foundNetNS {
  1284  			networkMode = "host"
  1285  		}
  1286  	}
  1287  	return networkMode
  1288  }
  1289  
  1290  // Unlocked accessor for networks
  1291  func (c *Container) networks() (map[string]types.PerNetworkOptions, error) {
  1292  	return c.runtime.state.GetNetworks(c)
  1293  }
  1294  
  1295  // getInterfaceByName returns a formatted interface name for a given
  1296  // network along with a bool as to whether the network existed
  1297  func (d ContainerNetworkDescriptions) getInterfaceByName(networkName string) (string, bool) {
  1298  	val, exists := d[networkName]
  1299  	if !exists {
  1300  		return "", exists
  1301  	}
  1302  	return fmt.Sprintf("eth%d", val), exists
  1303  }
  1304  
  1305  // getNetworkStatus get the current network status from the state. If the container
  1306  // still uses the old network status it is converted to the new format. This function
  1307  // should be used instead of reading c.state.NetworkStatus directly.
  1308  func (c *Container) getNetworkStatus() map[string]types.StatusBlock {
  1309  	if c.state.NetworkStatus != nil {
  1310  		return c.state.NetworkStatus
  1311  	}
  1312  	if c.state.NetworkStatusOld != nil {
  1313  		networks, err := c.networks()
  1314  		if err != nil {
  1315  			return nil
  1316  		}
  1317  		if len(networks) != len(c.state.NetworkStatusOld) {
  1318  			return nil
  1319  		}
  1320  		result := make(map[string]types.StatusBlock, len(c.state.NetworkStatusOld))
  1321  		i := 0
  1322  		// Note: NetworkStatusOld does not contain the network names so we get them extra
  1323  		// We cannot guarantee the same order but after a state refresh it should work
  1324  		for netName := range networks {
  1325  			status, err := cni.CNIResultToStatus(c.state.NetworkStatusOld[i])
  1326  			if err != nil {
  1327  				return nil
  1328  			}
  1329  			result[netName] = status
  1330  			i++
  1331  		}
  1332  		c.state.NetworkStatus = result
  1333  		_ = c.save()
  1334  
  1335  		return result
  1336  	}
  1337  	return nil
  1338  }