github.com/containers/podman/v2@v2.2.2-0.20210501105131-c1e07d070c4c/pkg/varlinkapi/container.go (about)

     1  package varlinkapi
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io"
     7  	"os"
     8  	"path/filepath"
     9  	"regexp"
    10  	"runtime"
    11  	"sort"
    12  	"strconv"
    13  	"strings"
    14  	"sync"
    15  	"time"
    16  
    17  	"github.com/containers/image/v5/types"
    18  	"github.com/containers/podman/v2/libpod"
    19  	"github.com/containers/podman/v2/libpod/define"
    20  	"github.com/containers/podman/v2/libpod/image"
    21  	"github.com/containers/podman/v2/pkg/timetype"
    22  	"github.com/containers/podman/v2/pkg/util"
    23  	"github.com/cri-o/ocicni/pkg/ocicni"
    24  	"github.com/docker/go-units"
    25  	"github.com/google/shlex"
    26  	"github.com/pkg/errors"
    27  	"github.com/sirupsen/logrus"
    28  	v1 "k8s.io/api/core/v1"
    29  )
    30  
    31  const (
    32  	cidTruncLength = 12
    33  	podTruncLength = 12
    34  	iidTruncLength = 12
    35  	cmdTruncLength = 17
    36  )
    37  
    38  // PsOptions describes the struct being formed for ps.
    39  type PsOptions struct {
    40  	All       bool
    41  	Format    string
    42  	Last      int
    43  	Latest    bool
    44  	NoTrunc   bool
    45  	Pod       bool
    46  	Quiet     bool
    47  	Size      bool
    48  	Sort      string
    49  	Namespace bool
    50  	Sync      bool
    51  }
    52  
    53  // BatchContainerStruct is the return object from BatchContainer and contains
    54  // container related information.
    55  type BatchContainerStruct struct {
    56  	ConConfig   *libpod.ContainerConfig
    57  	ConState    define.ContainerStatus
    58  	ExitCode    int32
    59  	Exited      bool
    60  	Pid         int
    61  	StartedTime time.Time
    62  	ExitedTime  time.Time
    63  	Size        *ContainerSize
    64  }
    65  
    66  // PsContainerOutput is the struct being returned from a parallel
    67  // batch operation.
    68  type PsContainerOutput struct {
    69  	ID        string
    70  	Image     string
    71  	ImageID   string
    72  	Command   string
    73  	Created   string
    74  	Ports     string
    75  	Names     string
    76  	IsInfra   bool
    77  	Status    string
    78  	State     define.ContainerStatus
    79  	Pid       int
    80  	Size      *ContainerSize
    81  	Pod       string
    82  	PodName   string
    83  	CreatedAt time.Time
    84  	ExitedAt  time.Time
    85  	StartedAt time.Time
    86  	Labels    map[string]string
    87  	PID       string
    88  	Cgroup    string
    89  	IPC       string
    90  	MNT       string
    91  	NET       string
    92  	PIDNS     string
    93  	User      string
    94  	UTS       string
    95  	Mounts    string
    96  }
    97  
    98  // Namespace describes output for ps namespace.
    99  type Namespace struct {
   100  	PID    string `json:"pid,omitempty"`
   101  	Cgroup string `json:"cgroup,omitempty"`
   102  	IPC    string `json:"ipc,omitempty"`
   103  	MNT    string `json:"mnt,omitempty"`
   104  	NET    string `json:"net,omitempty"`
   105  	PIDNS  string `json:"pidns,omitempty"`
   106  	User   string `json:"user,omitempty"`
   107  	UTS    string `json:"uts,omitempty"`
   108  }
   109  
   110  // ContainerSize holds the size of the container's root filesystem and top
   111  // read-write layer.
   112  type ContainerSize struct {
   113  	RootFsSize int64 `json:"rootFsSize"`
   114  	RwSize     int64 `json:"rwSize"`
   115  }
   116  
   117  // NewBatchContainer runs a batch process under one lock to get container information and only
   118  // be called in PBatch.
   119  func NewBatchContainer(r *libpod.Runtime, ctr *libpod.Container, opts PsOptions) (PsContainerOutput, error) {
   120  	var (
   121  		conState  define.ContainerStatus
   122  		command   string
   123  		created   string
   124  		status    string
   125  		exitedAt  time.Time
   126  		startedAt time.Time
   127  		exitCode  int32
   128  		err       error
   129  		pid       int
   130  		size      *ContainerSize
   131  		ns        *Namespace
   132  		pso       PsContainerOutput
   133  	)
   134  	batchErr := ctr.Batch(func(c *libpod.Container) error {
   135  		if opts.Sync {
   136  			if err := c.Sync(); err != nil {
   137  				return err
   138  			}
   139  		}
   140  
   141  		conState, err = c.State()
   142  		if err != nil {
   143  			return errors.Wrapf(err, "unable to obtain container state")
   144  		}
   145  		command = strings.Join(c.Command(), " ")
   146  		created = units.HumanDuration(time.Since(c.CreatedTime())) + " ago"
   147  
   148  		exitCode, _, err = c.ExitCode()
   149  		if err != nil {
   150  			return errors.Wrapf(err, "unable to obtain container exit code")
   151  		}
   152  		startedAt, err = c.StartedTime()
   153  		if err != nil {
   154  			logrus.Errorf("error getting started time for %q: %v", c.ID(), err)
   155  		}
   156  		exitedAt, err = c.FinishedTime()
   157  		if err != nil {
   158  			logrus.Errorf("error getting exited time for %q: %v", c.ID(), err)
   159  		}
   160  		if opts.Namespace {
   161  			pid, err = c.PID()
   162  			if err != nil {
   163  				return errors.Wrapf(err, "unable to obtain container pid")
   164  			}
   165  			ns = GetNamespaces(pid)
   166  		}
   167  		if opts.Size {
   168  			size = new(ContainerSize)
   169  
   170  			rootFsSize, err := c.RootFsSize()
   171  			if err != nil {
   172  				logrus.Errorf("error getting root fs size for %q: %v", c.ID(), err)
   173  			}
   174  
   175  			rwSize, err := c.RWSize()
   176  			if err != nil {
   177  				logrus.Errorf("error getting rw size for %q: %v", c.ID(), err)
   178  			}
   179  
   180  			size.RootFsSize = rootFsSize
   181  			size.RwSize = rwSize
   182  		}
   183  
   184  		return nil
   185  	})
   186  
   187  	if batchErr != nil {
   188  		return pso, batchErr
   189  	}
   190  
   191  	switch conState.String() {
   192  	case define.ContainerStateExited.String():
   193  		fallthrough
   194  	case define.ContainerStateStopped.String():
   195  		exitedSince := units.HumanDuration(time.Since(exitedAt))
   196  		status = fmt.Sprintf("Exited (%d) %s ago", exitCode, exitedSince)
   197  	case define.ContainerStateRunning.String():
   198  		status = "Up " + units.HumanDuration(time.Since(startedAt)) + " ago"
   199  	case define.ContainerStatePaused.String():
   200  		status = "Paused"
   201  	case define.ContainerStateCreated.String(), define.ContainerStateConfigured.String():
   202  		status = "Created"
   203  	case define.ContainerStateRemoving.String():
   204  		status = "Removing"
   205  	default:
   206  		status = "Error"
   207  	}
   208  
   209  	imageID, imageName := ctr.Image()
   210  	cid := ctr.ID()
   211  	podID := ctr.PodID()
   212  	if !opts.NoTrunc {
   213  		cid = cid[0:cidTruncLength]
   214  		if len(podID) > podTruncLength {
   215  			podID = podID[0:podTruncLength]
   216  		}
   217  		if len(command) > cmdTruncLength {
   218  			command = command[0:cmdTruncLength] + "..."
   219  		}
   220  		if len(imageID) > iidTruncLength {
   221  			imageID = imageID[0:iidTruncLength]
   222  		}
   223  	}
   224  
   225  	ports, err := ctr.PortMappings()
   226  	if err != nil {
   227  		logrus.Errorf("unable to lookup namespace container for %s", ctr.ID())
   228  	}
   229  
   230  	pso.ID = cid
   231  	pso.Image = imageName
   232  	pso.ImageID = imageID
   233  	pso.Command = command
   234  	pso.Created = created
   235  	pso.Ports = portsToString(ports)
   236  	pso.Names = ctr.Name()
   237  	pso.IsInfra = ctr.IsInfra()
   238  	pso.Status = status
   239  	pso.State = conState
   240  	pso.Pid = pid
   241  	pso.Size = size
   242  	pso.ExitedAt = exitedAt
   243  	pso.CreatedAt = ctr.CreatedTime()
   244  	pso.StartedAt = startedAt
   245  	pso.Labels = ctr.Labels()
   246  	pso.Mounts = strings.Join(ctr.UserVolumes(), " ")
   247  
   248  	// Add pod name and pod ID if requested by user.
   249  	// No need to look up the pod if its ID is empty.
   250  	if opts.Pod && len(podID) > 0 {
   251  		// The pod name is not in the container definition
   252  		// so we need to retrieve it using the pod ID.
   253  		var podName string
   254  		pod, err := r.LookupPod(podID)
   255  		if err != nil {
   256  			logrus.Errorf("unable to lookup pod for container %s", ctr.ID())
   257  		} else {
   258  			podName = pod.Name()
   259  		}
   260  
   261  		pso.Pod = podID
   262  		pso.PodName = podName
   263  	}
   264  
   265  	if opts.Namespace {
   266  		pso.Cgroup = ns.Cgroup
   267  		pso.IPC = ns.IPC
   268  		pso.MNT = ns.MNT
   269  		pso.NET = ns.NET
   270  		pso.User = ns.User
   271  		pso.UTS = ns.UTS
   272  		pso.PIDNS = ns.PIDNS
   273  	}
   274  
   275  	return pso, nil
   276  }
   277  
   278  type batchFunc func() (PsContainerOutput, error)
   279  
   280  type workerInput struct {
   281  	parallelFunc batchFunc
   282  	opts         PsOptions
   283  	cid          string
   284  	job          int
   285  }
   286  
   287  // worker is a "threaded" worker that takes jobs from the channel "queue".
   288  func worker(wg *sync.WaitGroup, jobs <-chan workerInput, results chan<- PsContainerOutput, errors chan<- error) {
   289  	for j := range jobs {
   290  		r, err := j.parallelFunc()
   291  		// If we find an error, we return just the error.
   292  		if err != nil {
   293  			errors <- err
   294  		} else {
   295  			// Return the result.
   296  			results <- r
   297  		}
   298  		wg.Done()
   299  	}
   300  }
   301  
   302  // GenerateContainerFilterFuncs return ContainerFilter functions based of filter.
   303  func GenerateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime) (func(container *libpod.Container) bool, error) {
   304  	switch filter {
   305  	case "id":
   306  		return func(c *libpod.Container) bool {
   307  			return strings.Contains(c.ID(), filterValue)
   308  		}, nil
   309  	case "label":
   310  		var filterArray = strings.SplitN(filterValue, "=", 2)
   311  		var filterKey = filterArray[0]
   312  		if len(filterArray) > 1 {
   313  			filterValue = filterArray[1]
   314  		} else {
   315  			filterValue = ""
   316  		}
   317  		return func(c *libpod.Container) bool {
   318  			for labelKey, labelValue := range c.Labels() {
   319  				if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) {
   320  					return true
   321  				}
   322  			}
   323  			return false
   324  		}, nil
   325  	case "name":
   326  		return func(c *libpod.Container) bool {
   327  			match, err := regexp.MatchString(filterValue, c.Name())
   328  			if err != nil {
   329  				return false
   330  			}
   331  			return match
   332  		}, nil
   333  	case "exited":
   334  		exitCode, err := strconv.ParseInt(filterValue, 10, 32)
   335  		if err != nil {
   336  			return nil, errors.Wrapf(err, "exited code out of range %q", filterValue)
   337  		}
   338  		return func(c *libpod.Container) bool {
   339  			ec, exited, err := c.ExitCode()
   340  			if ec == int32(exitCode) && err == nil && exited {
   341  				return true
   342  			}
   343  			return false
   344  		}, nil
   345  	case "status":
   346  		if !util.StringInSlice(filterValue, []string{"created", "running", "paused", "stopped", "exited", "unknown"}) {
   347  			return nil, errors.Errorf("%s is not a valid status", filterValue)
   348  		}
   349  		return func(c *libpod.Container) bool {
   350  			status, err := c.State()
   351  			if err != nil {
   352  				return false
   353  			}
   354  			if filterValue == "stopped" {
   355  				filterValue = "exited"
   356  			}
   357  			state := status.String()
   358  			if status == define.ContainerStateConfigured {
   359  				state = "created"
   360  			} else if status == define.ContainerStateStopped {
   361  				state = "exited"
   362  			}
   363  			return state == filterValue
   364  		}, nil
   365  	case "ancestor":
   366  		// This needs to refine to match docker
   367  		// - ancestor=(<image-name>[:tag]|<image-id>| ⟨image@digest⟩) - containers created from an image or a descendant.
   368  		return func(c *libpod.Container) bool {
   369  			containerConfig := c.Config()
   370  			if strings.Contains(containerConfig.RootfsImageID, filterValue) || strings.Contains(containerConfig.RootfsImageName, filterValue) {
   371  				return true
   372  			}
   373  			return false
   374  		}, nil
   375  	case "before":
   376  		ctr, err := r.LookupContainer(filterValue)
   377  		if err != nil {
   378  			return nil, errors.Errorf("unable to find container by name or id of %s", filterValue)
   379  		}
   380  		containerConfig := ctr.Config()
   381  		createTime := containerConfig.CreatedTime
   382  		return func(c *libpod.Container) bool {
   383  			cc := c.Config()
   384  			return createTime.After(cc.CreatedTime)
   385  		}, nil
   386  	case "since":
   387  		ctr, err := r.LookupContainer(filterValue)
   388  		if err != nil {
   389  			return nil, errors.Errorf("unable to find container by name or id of %s", filterValue)
   390  		}
   391  		containerConfig := ctr.Config()
   392  		createTime := containerConfig.CreatedTime
   393  		return func(c *libpod.Container) bool {
   394  			cc := c.Config()
   395  			return createTime.Before(cc.CreatedTime)
   396  		}, nil
   397  	case "volume":
   398  		//- volume=(<volume-name>|<mount-point-destination>)
   399  		return func(c *libpod.Container) bool {
   400  			containerConfig := c.Config()
   401  			var dest string
   402  			arr := strings.Split(filterValue, ":")
   403  			source := arr[0]
   404  			if len(arr) == 2 {
   405  				dest = arr[1]
   406  			}
   407  			for _, mount := range containerConfig.Spec.Mounts {
   408  				if dest != "" && (mount.Source == source && mount.Destination == dest) {
   409  					return true
   410  				}
   411  				if dest == "" && mount.Source == source {
   412  					return true
   413  				}
   414  			}
   415  			return false
   416  		}, nil
   417  	case "health":
   418  		return func(c *libpod.Container) bool {
   419  			hcStatus, err := c.HealthCheckStatus()
   420  			if err != nil {
   421  				return false
   422  			}
   423  			return hcStatus == filterValue
   424  		}, nil
   425  	case "until":
   426  		ts, err := timetype.GetTimestamp(filterValue, time.Now())
   427  		if err != nil {
   428  			return nil, err
   429  		}
   430  		seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0)
   431  		if err != nil {
   432  			return nil, err
   433  		}
   434  		until := time.Unix(seconds, nanoseconds)
   435  		return func(c *libpod.Container) bool {
   436  			if !until.IsZero() && c.CreatedTime().After((until)) {
   437  				return true
   438  			}
   439  			return false
   440  		}, nil
   441  	}
   442  	return nil, errors.Errorf("%s is an invalid filter", filter)
   443  }
   444  
   445  // GetPsContainerOutput returns a slice of containers specifically for ps output.
   446  func GetPsContainerOutput(r *libpod.Runtime, opts PsOptions, filters []string, maxWorkers int) ([]PsContainerOutput, error) {
   447  	var (
   448  		filterFuncs      []libpod.ContainerFilter
   449  		outputContainers []*libpod.Container
   450  	)
   451  
   452  	if len(filters) > 0 {
   453  		for _, f := range filters {
   454  			filterSplit := strings.SplitN(f, "=", 2)
   455  			if len(filterSplit) < 2 {
   456  				return nil, errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f)
   457  			}
   458  			generatedFunc, err := GenerateContainerFilterFuncs(filterSplit[0], filterSplit[1], r)
   459  			if err != nil {
   460  				return nil, errors.Wrapf(err, "invalid filter")
   461  			}
   462  			filterFuncs = append(filterFuncs, generatedFunc)
   463  		}
   464  	}
   465  	if !opts.Latest {
   466  		// Get all containers.
   467  		containers, err := r.GetContainers(filterFuncs...)
   468  		if err != nil {
   469  			return nil, err
   470  		}
   471  
   472  		// We only want the last few containers.
   473  		if opts.Last > 0 && opts.Last <= len(containers) {
   474  			return nil, errors.Errorf("--last not yet supported")
   475  		} else {
   476  			outputContainers = containers
   477  		}
   478  	} else {
   479  		// Get just the latest container.
   480  		// Ignore filters.
   481  		latestCtr, err := r.GetLatestContainer()
   482  		if err != nil {
   483  			return nil, err
   484  		}
   485  
   486  		outputContainers = []*libpod.Container{latestCtr}
   487  	}
   488  
   489  	pss := PBatch(r, outputContainers, maxWorkers, opts)
   490  	return pss, nil
   491  }
   492  
   493  // PBatch performs batch operations on a container in parallel. It spawns the
   494  // number of workers relative to the number of parallel operations desired.
   495  func PBatch(r *libpod.Runtime, containers []*libpod.Container, workers int, opts PsOptions) []PsContainerOutput {
   496  	var wg sync.WaitGroup
   497  	psResults := []PsContainerOutput{}
   498  
   499  	// If the number of containers in question is less than the number of
   500  	// proposed parallel operations, we shouldn't spawn so many workers.
   501  	if workers > len(containers) {
   502  		workers = len(containers)
   503  	}
   504  
   505  	jobs := make(chan workerInput, len(containers))
   506  	results := make(chan PsContainerOutput, len(containers))
   507  	batchErrors := make(chan error, len(containers))
   508  
   509  	// Create the workers.
   510  	for w := 1; w <= workers; w++ {
   511  		go worker(&wg, jobs, results, batchErrors)
   512  	}
   513  
   514  	// Add jobs to the workers.
   515  	for i, j := range containers {
   516  		j := j
   517  		wg.Add(1)
   518  		f := func() (PsContainerOutput, error) {
   519  			return NewBatchContainer(r, j, opts)
   520  		}
   521  		jobs <- workerInput{
   522  			parallelFunc: f,
   523  			opts:         opts,
   524  			cid:          j.ID(),
   525  			job:          i,
   526  		}
   527  	}
   528  	close(jobs)
   529  	wg.Wait()
   530  	close(results)
   531  	close(batchErrors)
   532  	for err := range batchErrors {
   533  		logrus.Errorf("unable to get container info: %q", err)
   534  	}
   535  	for res := range results {
   536  		// We sort out running vs non-running here to save lots of copying
   537  		// later.
   538  		if !opts.All && !opts.Latest && opts.Last < 1 {
   539  			if !res.IsInfra && res.State == define.ContainerStateRunning {
   540  				psResults = append(psResults, res)
   541  			}
   542  		} else {
   543  			psResults = append(psResults, res)
   544  		}
   545  	}
   546  	return psResults
   547  }
   548  
   549  // BatchContainerOp is used in ps to reduce performance hits by "batching"
   550  // locks.
   551  func BatchContainerOp(ctr *libpod.Container, opts PsOptions) (BatchContainerStruct, error) {
   552  	var (
   553  		conConfig   *libpod.ContainerConfig
   554  		conState    define.ContainerStatus
   555  		err         error
   556  		exitCode    int32
   557  		exited      bool
   558  		pid         int
   559  		size        *ContainerSize
   560  		startedTime time.Time
   561  		exitedTime  time.Time
   562  	)
   563  
   564  	batchErr := ctr.Batch(func(c *libpod.Container) error {
   565  		conConfig = c.Config()
   566  		conState, err = c.State()
   567  		if err != nil {
   568  			return errors.Wrapf(err, "unable to obtain container state")
   569  		}
   570  
   571  		exitCode, exited, err = c.ExitCode()
   572  		if err != nil {
   573  			return errors.Wrapf(err, "unable to obtain container exit code")
   574  		}
   575  		startedTime, err = c.StartedTime()
   576  		if err != nil {
   577  			logrus.Errorf("error getting started time for %q: %v", c.ID(), err)
   578  		}
   579  		exitedTime, err = c.FinishedTime()
   580  		if err != nil {
   581  			logrus.Errorf("error getting exited time for %q: %v", c.ID(), err)
   582  		}
   583  
   584  		if !opts.Size && !opts.Namespace {
   585  			return nil
   586  		}
   587  
   588  		if opts.Namespace {
   589  			pid, err = c.PID()
   590  			if err != nil {
   591  				return errors.Wrapf(err, "unable to obtain container pid")
   592  			}
   593  		}
   594  		if opts.Size {
   595  			size = new(ContainerSize)
   596  
   597  			rootFsSize, err := c.RootFsSize()
   598  			if err != nil {
   599  				logrus.Errorf("error getting root fs size for %q: %v", c.ID(), err)
   600  			}
   601  
   602  			rwSize, err := c.RWSize()
   603  			if err != nil {
   604  				logrus.Errorf("error getting rw size for %q: %v", c.ID(), err)
   605  			}
   606  
   607  			size.RootFsSize = rootFsSize
   608  			size.RwSize = rwSize
   609  		}
   610  		return nil
   611  	})
   612  	if batchErr != nil {
   613  		return BatchContainerStruct{}, batchErr
   614  	}
   615  	return BatchContainerStruct{
   616  		ConConfig:   conConfig,
   617  		ConState:    conState,
   618  		ExitCode:    exitCode,
   619  		Exited:      exited,
   620  		Pid:         pid,
   621  		StartedTime: startedTime,
   622  		ExitedTime:  exitedTime,
   623  		Size:        size,
   624  	}, nil
   625  }
   626  
   627  // GetNamespaces returns a populated namespace struct.
   628  func GetNamespaces(pid int) *Namespace {
   629  	ctrPID := strconv.Itoa(pid)
   630  	cgroup, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "cgroup"))
   631  	ipc, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "ipc"))
   632  	mnt, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "mnt"))
   633  	net, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "net"))
   634  	pidns, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "pid"))
   635  	user, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "user"))
   636  	uts, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "uts"))
   637  
   638  	return &Namespace{
   639  		PID:    ctrPID,
   640  		Cgroup: cgroup,
   641  		IPC:    ipc,
   642  		MNT:    mnt,
   643  		NET:    net,
   644  		PIDNS:  pidns,
   645  		User:   user,
   646  		UTS:    uts,
   647  	}
   648  }
   649  
   650  // GetNamespaceInfo is an exported wrapper for getNamespaceInfo
   651  func GetNamespaceInfo(path string) (string, error) {
   652  	return getNamespaceInfo(path)
   653  }
   654  
   655  func getNamespaceInfo(path string) (string, error) {
   656  	val, err := os.Readlink(path)
   657  	if err != nil {
   658  		return "", errors.Wrapf(err, "error getting info from %q", path)
   659  	}
   660  	return getStrFromSquareBrackets(val), nil
   661  }
   662  
   663  // getStrFromSquareBrackets gets the string inside [] from a string.
   664  func getStrFromSquareBrackets(cmd string) string {
   665  	reg := regexp.MustCompile(`.*\[|\].*`)
   666  	arr := strings.Split(reg.ReplaceAllLiteralString(cmd, ""), ",")
   667  	return strings.Join(arr, ",")
   668  }
   669  
   670  func comparePorts(i, j ocicni.PortMapping) bool {
   671  	if i.ContainerPort != j.ContainerPort {
   672  		return i.ContainerPort < j.ContainerPort
   673  	}
   674  
   675  	if i.HostIP != j.HostIP {
   676  		return i.HostIP < j.HostIP
   677  	}
   678  
   679  	if i.HostPort != j.HostPort {
   680  		return i.HostPort < j.HostPort
   681  	}
   682  
   683  	return i.Protocol < j.Protocol
   684  }
   685  
   686  // formatGroup returns the group as <IP:startPort:lastPort->startPort:lastPort/Proto>
   687  // e.g 0.0.0.0:1000-1006->1000-1006/tcp.
   688  func formatGroup(key string, start, last int32) string {
   689  	parts := strings.Split(key, "/")
   690  	groupType := parts[0]
   691  	var ip string
   692  	if len(parts) > 1 {
   693  		ip = parts[0]
   694  		groupType = parts[1]
   695  	}
   696  	group := strconv.Itoa(int(start))
   697  	if start != last {
   698  		group = fmt.Sprintf("%s-%d", group, last)
   699  	}
   700  	if ip != "" {
   701  		group = fmt.Sprintf("%s:%s->%s", ip, group, group)
   702  	}
   703  	return fmt.Sprintf("%s/%s", group, groupType)
   704  }
   705  
   706  // portsToString converts the ports used to a string of the from "port1, port2"
   707  // and also groups a continuous list of ports into a readable format.
   708  func portsToString(ports []ocicni.PortMapping) string {
   709  	type portGroup struct {
   710  		first int32
   711  		last  int32
   712  	}
   713  	var portDisplay []string
   714  	if len(ports) == 0 {
   715  		return ""
   716  	}
   717  	//Sort the ports, so grouping continuous ports become easy.
   718  	sort.Slice(ports, func(i, j int) bool {
   719  		return comparePorts(ports[i], ports[j])
   720  	})
   721  
   722  	// portGroupMap is used for grouping continuous ports.
   723  	portGroupMap := make(map[string]*portGroup)
   724  	var groupKeyList []string
   725  
   726  	for _, v := range ports {
   727  
   728  		hostIP := v.HostIP
   729  		if hostIP == "" {
   730  			hostIP = "0.0.0.0"
   731  		}
   732  		// If hostPort and containerPort are not same, consider as individual port.
   733  		if v.ContainerPort != v.HostPort {
   734  			portDisplay = append(portDisplay, fmt.Sprintf("%s:%d->%d/%s", hostIP, v.HostPort, v.ContainerPort, v.Protocol))
   735  			continue
   736  		}
   737  
   738  		portMapKey := fmt.Sprintf("%s/%s", hostIP, v.Protocol)
   739  
   740  		portgroup, ok := portGroupMap[portMapKey]
   741  		if !ok {
   742  			portGroupMap[portMapKey] = &portGroup{first: v.ContainerPort, last: v.ContainerPort}
   743  			// This list is required to traverse portGroupMap.
   744  			groupKeyList = append(groupKeyList, portMapKey)
   745  			continue
   746  		}
   747  
   748  		if portgroup.last == (v.ContainerPort - 1) {
   749  			portgroup.last = v.ContainerPort
   750  			continue
   751  		}
   752  	}
   753  	// For each portMapKey, format group list and append to output string.
   754  	for _, portKey := range groupKeyList {
   755  		group := portGroupMap[portKey]
   756  		portDisplay = append(portDisplay, formatGroup(portKey, group.first, group.last))
   757  	}
   758  	return strings.Join(portDisplay, ", ")
   759  }
   760  
   761  // GetRunlabel is a helper function for runlabel; it gets the image if needed and begins the
   762  // construction of the runlabel output and environment variables.
   763  func GetRunlabel(label string, runlabelImage string, ctx context.Context, runtime *libpod.Runtime, pull bool, inputCreds string, dockerRegistryOptions image.DockerRegistryOptions, authfile string, signaturePolicyPath string, output io.Writer) (string, string, error) {
   764  	var (
   765  		newImage  *image.Image
   766  		err       error
   767  		imageName string
   768  	)
   769  	if pull {
   770  		var registryCreds *types.DockerAuthConfig
   771  		if inputCreds != "" {
   772  			creds, err := util.ParseRegistryCreds(inputCreds)
   773  			if err != nil {
   774  				return "", "", err
   775  			}
   776  			registryCreds = creds
   777  		}
   778  		dockerRegistryOptions.DockerRegistryCreds = registryCreds
   779  		newImage, err = runtime.ImageRuntime().New(ctx, runlabelImage, signaturePolicyPath, authfile, output, &dockerRegistryOptions, image.SigningOptions{}, &label, util.PullImageMissing)
   780  	} else {
   781  		newImage, err = runtime.ImageRuntime().NewFromLocal(runlabelImage)
   782  	}
   783  	if err != nil {
   784  		return "", "", errors.Wrapf(err, "unable to find image")
   785  	}
   786  
   787  	if len(newImage.Names()) < 1 {
   788  		imageName = newImage.ID()
   789  	} else {
   790  		imageName = newImage.Names()[0]
   791  	}
   792  
   793  	runLabel, err := newImage.GetLabel(ctx, label)
   794  	return runLabel, imageName, err
   795  }
   796  
   797  // GenerateRunlabelCommand generates the command that will eventually be executed by Podman.
   798  func GenerateRunlabelCommand(runLabel, imageName, name string, opts map[string]string, extraArgs []string, globalOpts string) ([]string, []string, error) {
   799  	// If no name is provided, we use the image's basename instead.
   800  	if name == "" {
   801  		baseName, err := image.GetImageBaseName(imageName)
   802  		if err != nil {
   803  			return nil, nil, err
   804  		}
   805  		name = baseName
   806  	}
   807  	// The user provided extra arguments that need to be tacked onto the label's command.
   808  	if len(extraArgs) > 0 {
   809  		runLabel = fmt.Sprintf("%s %s", runLabel, strings.Join(extraArgs, " "))
   810  	}
   811  	cmd, err := GenerateCommand(runLabel, imageName, name, globalOpts)
   812  	if err != nil {
   813  		return nil, nil, errors.Wrapf(err, "unable to generate command")
   814  	}
   815  	env := GenerateRunEnvironment(name, imageName, opts)
   816  	env = append(env, "PODMAN_RUNLABEL_NESTED=1")
   817  
   818  	envmap := envSliceToMap(env)
   819  
   820  	envmapper := func(k string) string {
   821  		switch k {
   822  		case "OPT1":
   823  			return envmap["OPT1"]
   824  		case "OPT2":
   825  			return envmap["OPT2"]
   826  		case "OPT3":
   827  			return envmap["OPT3"]
   828  		case "PWD":
   829  			// I would prefer to use os.getenv but it appears PWD is not in the os env list.
   830  			d, err := os.Getwd()
   831  			if err != nil {
   832  				logrus.Error("unable to determine current working directory")
   833  				return ""
   834  			}
   835  			return d
   836  		}
   837  		return ""
   838  	}
   839  	newS := os.Expand(strings.Join(cmd, " "), envmapper)
   840  	cmd, err = shlex.Split(newS)
   841  	if err != nil {
   842  		return nil, nil, err
   843  	}
   844  	return cmd, env, nil
   845  }
   846  
   847  func envSliceToMap(env []string) map[string]string {
   848  	m := make(map[string]string)
   849  	for _, i := range env {
   850  		split := strings.Split(i, "=")
   851  		m[split[0]] = strings.Join(split[1:], " ")
   852  	}
   853  	return m
   854  }
   855  
   856  // GenerateKube generates kubernetes yaml based on a pod or container.
   857  func GenerateKube(name string, service bool, r *libpod.Runtime) (*v1.Pod, *v1.Service, error) {
   858  	var (
   859  		pod          *libpod.Pod
   860  		podYAML      *v1.Pod
   861  		err          error
   862  		container    *libpod.Container
   863  		servicePorts []v1.ServicePort
   864  		serviceYAML  v1.Service
   865  	)
   866  	// Get the container in question.
   867  	container, err = r.LookupContainer(name)
   868  	if err != nil {
   869  		pod, err = r.LookupPod(name)
   870  		if err != nil {
   871  			return nil, nil, err
   872  		}
   873  		podYAML, servicePorts, err = pod.GenerateForKube()
   874  	} else {
   875  		if len(container.Dependencies()) > 0 {
   876  			return nil, nil, errors.Wrapf(define.ErrNotImplemented, "containers with dependencies")
   877  		}
   878  		podYAML, err = container.GenerateForKube()
   879  	}
   880  	if err != nil {
   881  		return nil, nil, err
   882  	}
   883  
   884  	if service {
   885  		serviceYAML = libpod.GenerateKubeServiceFromV1Pod(podYAML, servicePorts)
   886  	}
   887  	return podYAML, &serviceYAML, nil
   888  }
   889  
   890  // Parallelize provides the maximum number of parallel workers (int) as calculated by a basic
   891  // heuristic. This can be overridden by the --max-workers primary switch to podman.
   892  func Parallelize(job string) int {
   893  	numCpus := runtime.NumCPU()
   894  	switch job {
   895  	case "kill":
   896  		if numCpus <= 3 {
   897  			return numCpus * 3
   898  		}
   899  		return numCpus * 4
   900  	case "pause":
   901  		if numCpus <= 3 {
   902  			return numCpus * 3
   903  		}
   904  		return numCpus * 4
   905  	case "ps":
   906  		return 8
   907  	case "restart":
   908  		return numCpus * 2
   909  	case "rm":
   910  		if numCpus <= 3 {
   911  			return numCpus * 3
   912  		} else {
   913  			return numCpus * 4
   914  		}
   915  	case "stop":
   916  		if numCpus <= 2 {
   917  			return 4
   918  		} else {
   919  			return numCpus * 3
   920  		}
   921  	case "unpause":
   922  		if numCpus <= 3 {
   923  			return numCpus * 3
   924  		}
   925  		return numCpus * 4
   926  	}
   927  	return 3
   928  }