github.com/containers/libpod@v1.9.4-0.20220419124438-4284fd425507/cmd/podman/shared/container.go (about)

     1  package shared
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io"
     7  	"os"
     8  	"path/filepath"
     9  	"regexp"
    10  	"sort"
    11  	"strconv"
    12  	"strings"
    13  	"sync"
    14  	"time"
    15  
    16  	"github.com/containers/image/v5/types"
    17  	"github.com/containers/libpod/libpod"
    18  	"github.com/containers/libpod/libpod/define"
    19  	"github.com/containers/libpod/libpod/image"
    20  	"github.com/containers/libpod/pkg/timetype"
    21  	"github.com/containers/libpod/pkg/util"
    22  	"github.com/cri-o/ocicni/pkg/ocicni"
    23  	"github.com/docker/go-units"
    24  	"github.com/google/shlex"
    25  	"github.com/pkg/errors"
    26  	"github.com/sirupsen/logrus"
    27  	v1 "k8s.io/api/core/v1"
    28  )
    29  
    30  const (
    31  	cidTruncLength = 12
    32  	podTruncLength = 12
    33  	iidTruncLength = 12
    34  	cmdTruncLength = 17
    35  )
    36  
    37  // PsOptions describes the struct being formed for ps.
    38  type PsOptions struct {
    39  	All       bool
    40  	Format    string
    41  	Last      int
    42  	Latest    bool
    43  	NoTrunc   bool
    44  	Pod       bool
    45  	Quiet     bool
    46  	Size      bool
    47  	Sort      string
    48  	Namespace bool
    49  	Sync      bool
    50  }
    51  
    52  // BatchContainerStruct is the return object from BatchContainer and contains
    53  // container related information.
    54  type BatchContainerStruct struct {
    55  	ConConfig   *libpod.ContainerConfig
    56  	ConState    define.ContainerStatus
    57  	ExitCode    int32
    58  	Exited      bool
    59  	Pid         int
    60  	StartedTime time.Time
    61  	ExitedTime  time.Time
    62  	Size        *ContainerSize
    63  }
    64  
    65  // PsContainerOutput is the struct being returned from a parallel
    66  // batch operation.
    67  type PsContainerOutput struct {
    68  	ID        string
    69  	Image     string
    70  	ImageID   string
    71  	Command   string
    72  	Created   string
    73  	Ports     string
    74  	Names     string
    75  	IsInfra   bool
    76  	Status    string
    77  	State     define.ContainerStatus
    78  	Pid       int
    79  	Size      *ContainerSize
    80  	Pod       string
    81  	PodName   string
    82  	CreatedAt time.Time
    83  	ExitedAt  time.Time
    84  	StartedAt time.Time
    85  	Labels    map[string]string
    86  	PID       string
    87  	Cgroup    string
    88  	IPC       string
    89  	MNT       string
    90  	NET       string
    91  	PIDNS     string
    92  	User      string
    93  	UTS       string
    94  	Mounts    string
    95  }
    96  
    97  // Namespace describes output for ps namespace.
    98  type Namespace struct {
    99  	PID    string `json:"pid,omitempty"`
   100  	Cgroup string `json:"cgroup,omitempty"`
   101  	IPC    string `json:"ipc,omitempty"`
   102  	MNT    string `json:"mnt,omitempty"`
   103  	NET    string `json:"net,omitempty"`
   104  	PIDNS  string `json:"pidns,omitempty"`
   105  	User   string `json:"user,omitempty"`
   106  	UTS    string `json:"uts,omitempty"`
   107  }
   108  
   109  // ContainerSize holds the size of the container's root filesystem and top
   110  // read-write layer.
   111  type ContainerSize struct {
   112  	RootFsSize int64 `json:"rootFsSize"`
   113  	RwSize     int64 `json:"rwSize"`
   114  }
   115  
   116  // NewBatchContainer runs a batch process under one lock to get container information and only
   117  // be called in PBatch.
   118  func NewBatchContainer(r *libpod.Runtime, ctr *libpod.Container, opts PsOptions) (PsContainerOutput, error) {
   119  	var (
   120  		conState  define.ContainerStatus
   121  		command   string
   122  		created   string
   123  		status    string
   124  		exitedAt  time.Time
   125  		startedAt time.Time
   126  		exitCode  int32
   127  		err       error
   128  		pid       int
   129  		size      *ContainerSize
   130  		ns        *Namespace
   131  		pso       PsContainerOutput
   132  	)
   133  	batchErr := ctr.Batch(func(c *libpod.Container) error {
   134  		if opts.Sync {
   135  			if err := c.Sync(); err != nil {
   136  				return err
   137  			}
   138  		}
   139  
   140  		conState, err = c.State()
   141  		if err != nil {
   142  			return errors.Wrapf(err, "unable to obtain container state")
   143  		}
   144  		command = strings.Join(c.Command(), " ")
   145  		created = units.HumanDuration(time.Since(c.CreatedTime())) + " ago"
   146  
   147  		exitCode, _, err = c.ExitCode()
   148  		if err != nil {
   149  			return errors.Wrapf(err, "unable to obtain container exit code")
   150  		}
   151  		startedAt, err = c.StartedTime()
   152  		if err != nil {
   153  			logrus.Errorf("error getting started time for %q: %v", c.ID(), err)
   154  		}
   155  		exitedAt, err = c.FinishedTime()
   156  		if err != nil {
   157  			logrus.Errorf("error getting exited time for %q: %v", c.ID(), err)
   158  		}
   159  		if opts.Namespace {
   160  			pid, err = c.PID()
   161  			if err != nil {
   162  				return errors.Wrapf(err, "unable to obtain container pid")
   163  			}
   164  			ns = GetNamespaces(pid)
   165  		}
   166  		if opts.Size {
   167  			size = new(ContainerSize)
   168  
   169  			rootFsSize, err := c.RootFsSize()
   170  			if err != nil {
   171  				logrus.Errorf("error getting root fs size for %q: %v", c.ID(), err)
   172  			}
   173  
   174  			rwSize, err := c.RWSize()
   175  			if err != nil {
   176  				logrus.Errorf("error getting rw size for %q: %v", c.ID(), err)
   177  			}
   178  
   179  			size.RootFsSize = rootFsSize
   180  			size.RwSize = rwSize
   181  		}
   182  
   183  		return nil
   184  	})
   185  
   186  	if batchErr != nil {
   187  		return pso, batchErr
   188  	}
   189  
   190  	switch conState.String() {
   191  	case define.ContainerStateExited.String():
   192  		fallthrough
   193  	case define.ContainerStateStopped.String():
   194  		exitedSince := units.HumanDuration(time.Since(exitedAt))
   195  		status = fmt.Sprintf("Exited (%d) %s ago", exitCode, exitedSince)
   196  	case define.ContainerStateRunning.String():
   197  		status = "Up " + units.HumanDuration(time.Since(startedAt)) + " ago"
   198  	case define.ContainerStatePaused.String():
   199  		status = "Paused"
   200  	case define.ContainerStateCreated.String(), define.ContainerStateConfigured.String():
   201  		status = "Created"
   202  	case define.ContainerStateRemoving.String():
   203  		status = "Removing"
   204  	default:
   205  		status = "Error"
   206  	}
   207  
   208  	imageID, imageName := ctr.Image()
   209  	cid := ctr.ID()
   210  	podID := ctr.PodID()
   211  	if !opts.NoTrunc {
   212  		cid = cid[0:cidTruncLength]
   213  		if len(podID) > podTruncLength {
   214  			podID = podID[0:podTruncLength]
   215  		}
   216  		if len(command) > cmdTruncLength {
   217  			command = command[0:cmdTruncLength] + "..."
   218  		}
   219  		if len(imageID) > iidTruncLength {
   220  			imageID = imageID[0:iidTruncLength]
   221  		}
   222  	}
   223  
   224  	ports, err := ctr.PortMappings()
   225  	if err != nil {
   226  		logrus.Errorf("unable to lookup namespace container for %s", ctr.ID())
   227  	}
   228  
   229  	pso.ID = cid
   230  	pso.Image = imageName
   231  	pso.ImageID = imageID
   232  	pso.Command = command
   233  	pso.Created = created
   234  	pso.Ports = portsToString(ports)
   235  	pso.Names = ctr.Name()
   236  	pso.IsInfra = ctr.IsInfra()
   237  	pso.Status = status
   238  	pso.State = conState
   239  	pso.Pid = pid
   240  	pso.Size = size
   241  	pso.ExitedAt = exitedAt
   242  	pso.CreatedAt = ctr.CreatedTime()
   243  	pso.StartedAt = startedAt
   244  	pso.Labels = ctr.Labels()
   245  	pso.Mounts = strings.Join(ctr.UserVolumes(), " ")
   246  
   247  	// Add pod name and pod ID if requested by user.
   248  	// No need to look up the pod if its ID is empty.
   249  	if opts.Pod && len(podID) > 0 {
   250  		// The pod name is not in the container definition
   251  		// so we need to retrieve it using the pod ID.
   252  		var podName string
   253  		pod, err := r.LookupPod(podID)
   254  		if err != nil {
   255  			logrus.Errorf("unable to lookup pod for container %s", ctr.ID())
   256  		} else {
   257  			podName = pod.Name()
   258  		}
   259  
   260  		pso.Pod = podID
   261  		pso.PodName = podName
   262  	}
   263  
   264  	if opts.Namespace {
   265  		pso.Cgroup = ns.Cgroup
   266  		pso.IPC = ns.IPC
   267  		pso.MNT = ns.MNT
   268  		pso.NET = ns.NET
   269  		pso.User = ns.User
   270  		pso.UTS = ns.UTS
   271  		pso.PIDNS = ns.PIDNS
   272  	}
   273  
   274  	return pso, nil
   275  }
   276  
   277  type batchFunc func() (PsContainerOutput, error)
   278  
   279  type workerInput struct {
   280  	parallelFunc batchFunc
   281  	opts         PsOptions
   282  	cid          string
   283  	job          int
   284  }
   285  
   286  // worker is a "threaded" worker that takes jobs from the channel "queue".
   287  func worker(wg *sync.WaitGroup, jobs <-chan workerInput, results chan<- PsContainerOutput, errors chan<- error) {
   288  	for j := range jobs {
   289  		r, err := j.parallelFunc()
   290  		// If we find an error, we return just the error.
   291  		if err != nil {
   292  			errors <- err
   293  		} else {
   294  			// Return the result.
   295  			results <- r
   296  		}
   297  		wg.Done()
   298  	}
   299  }
   300  
   301  // GenerateContainerFilterFuncs return ContainerFilter functions based of filter.
   302  func GenerateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime) (func(container *libpod.Container) bool, error) {
   303  	switch filter {
   304  	case "id":
   305  		return func(c *libpod.Container) bool {
   306  			return strings.Contains(c.ID(), filterValue)
   307  		}, nil
   308  	case "label":
   309  		var filterArray = strings.SplitN(filterValue, "=", 2)
   310  		var filterKey = filterArray[0]
   311  		if len(filterArray) > 1 {
   312  			filterValue = filterArray[1]
   313  		} else {
   314  			filterValue = ""
   315  		}
   316  		return func(c *libpod.Container) bool {
   317  			for labelKey, labelValue := range c.Labels() {
   318  				if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) {
   319  					return true
   320  				}
   321  			}
   322  			return false
   323  		}, nil
   324  	case "name":
   325  		return func(c *libpod.Container) bool {
   326  			match, err := regexp.MatchString(filterValue, c.Name())
   327  			if err != nil {
   328  				return false
   329  			}
   330  			return match
   331  		}, nil
   332  	case "exited":
   333  		exitCode, err := strconv.ParseInt(filterValue, 10, 32)
   334  		if err != nil {
   335  			return nil, errors.Wrapf(err, "exited code out of range %q", filterValue)
   336  		}
   337  		return func(c *libpod.Container) bool {
   338  			ec, exited, err := c.ExitCode()
   339  			if ec == int32(exitCode) && err == nil && exited {
   340  				return true
   341  			}
   342  			return false
   343  		}, nil
   344  	case "status":
   345  		if !util.StringInSlice(filterValue, []string{"created", "running", "paused", "stopped", "exited", "unknown"}) {
   346  			return nil, errors.Errorf("%s is not a valid status", filterValue)
   347  		}
   348  		return func(c *libpod.Container) bool {
   349  			status, err := c.State()
   350  			if err != nil {
   351  				return false
   352  			}
   353  			if filterValue == "stopped" {
   354  				filterValue = "exited"
   355  			}
   356  			state := status.String()
   357  			if status == define.ContainerStateConfigured {
   358  				state = "created"
   359  			} else if status == define.ContainerStateStopped {
   360  				state = "exited"
   361  			}
   362  			return state == filterValue
   363  		}, nil
   364  	case "ancestor":
   365  		// This needs to refine to match docker
   366  		// - ancestor=(<image-name>[:tag]|<image-id>| ⟨image@digest⟩) - containers created from an image or a descendant.
   367  		return func(c *libpod.Container) bool {
   368  			containerConfig := c.Config()
   369  			if strings.Contains(containerConfig.RootfsImageID, filterValue) || strings.Contains(containerConfig.RootfsImageName, filterValue) {
   370  				return true
   371  			}
   372  			return false
   373  		}, nil
   374  	case "before":
   375  		ctr, err := r.LookupContainer(filterValue)
   376  		if err != nil {
   377  			return nil, errors.Errorf("unable to find container by name or id of %s", filterValue)
   378  		}
   379  		containerConfig := ctr.Config()
   380  		createTime := containerConfig.CreatedTime
   381  		return func(c *libpod.Container) bool {
   382  			cc := c.Config()
   383  			return createTime.After(cc.CreatedTime)
   384  		}, nil
   385  	case "since":
   386  		ctr, err := r.LookupContainer(filterValue)
   387  		if err != nil {
   388  			return nil, errors.Errorf("unable to find container by name or id of %s", filterValue)
   389  		}
   390  		containerConfig := ctr.Config()
   391  		createTime := containerConfig.CreatedTime
   392  		return func(c *libpod.Container) bool {
   393  			cc := c.Config()
   394  			return createTime.Before(cc.CreatedTime)
   395  		}, nil
   396  	case "volume":
   397  		//- volume=(<volume-name>|<mount-point-destination>)
   398  		return func(c *libpod.Container) bool {
   399  			containerConfig := c.Config()
   400  			var dest string
   401  			arr := strings.Split(filterValue, ":")
   402  			source := arr[0]
   403  			if len(arr) == 2 {
   404  				dest = arr[1]
   405  			}
   406  			for _, mount := range containerConfig.Spec.Mounts {
   407  				if dest != "" && (mount.Source == source && mount.Destination == dest) {
   408  					return true
   409  				}
   410  				if dest == "" && mount.Source == source {
   411  					return true
   412  				}
   413  			}
   414  			return false
   415  		}, nil
   416  	case "health":
   417  		return func(c *libpod.Container) bool {
   418  			hcStatus, err := c.HealthCheckStatus()
   419  			if err != nil {
   420  				return false
   421  			}
   422  			return hcStatus == filterValue
   423  		}, nil
   424  	case "until":
   425  		ts, err := timetype.GetTimestamp(filterValue, time.Now())
   426  		if err != nil {
   427  			return nil, err
   428  		}
   429  		seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0)
   430  		if err != nil {
   431  			return nil, err
   432  		}
   433  		until := time.Unix(seconds, nanoseconds)
   434  		return func(c *libpod.Container) bool {
   435  			if !until.IsZero() && c.CreatedTime().After((until)) {
   436  				return true
   437  			}
   438  			return false
   439  		}, nil
   440  	}
   441  	return nil, errors.Errorf("%s is an invalid filter", filter)
   442  }
   443  
   444  // GetPsContainerOutput returns a slice of containers specifically for ps output.
   445  func GetPsContainerOutput(r *libpod.Runtime, opts PsOptions, filters []string, maxWorkers int) ([]PsContainerOutput, error) {
   446  	var (
   447  		filterFuncs      []libpod.ContainerFilter
   448  		outputContainers []*libpod.Container
   449  	)
   450  
   451  	if len(filters) > 0 {
   452  		for _, f := range filters {
   453  			filterSplit := strings.SplitN(f, "=", 2)
   454  			if len(filterSplit) < 2 {
   455  				return nil, errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f)
   456  			}
   457  			generatedFunc, err := GenerateContainerFilterFuncs(filterSplit[0], filterSplit[1], r)
   458  			if err != nil {
   459  				return nil, errors.Wrapf(err, "invalid filter")
   460  			}
   461  			filterFuncs = append(filterFuncs, generatedFunc)
   462  		}
   463  	}
   464  	if !opts.Latest {
   465  		// Get all containers.
   466  		containers, err := r.GetContainers(filterFuncs...)
   467  		if err != nil {
   468  			return nil, err
   469  		}
   470  
   471  		// We only want the last few containers.
   472  		if opts.Last > 0 && opts.Last <= len(containers) {
   473  			return nil, errors.Errorf("--last not yet supported")
   474  		} else {
   475  			outputContainers = containers
   476  		}
   477  	} else {
   478  		// Get just the latest container.
   479  		// Ignore filters.
   480  		latestCtr, err := r.GetLatestContainer()
   481  		if err != nil {
   482  			return nil, err
   483  		}
   484  
   485  		outputContainers = []*libpod.Container{latestCtr}
   486  	}
   487  
   488  	pss := PBatch(r, outputContainers, maxWorkers, opts)
   489  	return pss, nil
   490  }
   491  
   492  // PBatch performs batch operations on a container in parallel. It spawns the
   493  // number of workers relative to the number of parallel operations desired.
   494  func PBatch(r *libpod.Runtime, containers []*libpod.Container, workers int, opts PsOptions) []PsContainerOutput {
   495  	var wg sync.WaitGroup
   496  	psResults := []PsContainerOutput{}
   497  
   498  	// If the number of containers in question is less than the number of
   499  	// proposed parallel operations, we shouldn't spawn so many workers.
   500  	if workers > len(containers) {
   501  		workers = len(containers)
   502  	}
   503  
   504  	jobs := make(chan workerInput, len(containers))
   505  	results := make(chan PsContainerOutput, len(containers))
   506  	batchErrors := make(chan error, len(containers))
   507  
   508  	// Create the workers.
   509  	for w := 1; w <= workers; w++ {
   510  		go worker(&wg, jobs, results, batchErrors)
   511  	}
   512  
   513  	// Add jobs to the workers.
   514  	for i, j := range containers {
   515  		j := j
   516  		wg.Add(1)
   517  		f := func() (PsContainerOutput, error) {
   518  			return NewBatchContainer(r, j, opts)
   519  		}
   520  		jobs <- workerInput{
   521  			parallelFunc: f,
   522  			opts:         opts,
   523  			cid:          j.ID(),
   524  			job:          i,
   525  		}
   526  	}
   527  	close(jobs)
   528  	wg.Wait()
   529  	close(results)
   530  	close(batchErrors)
   531  	for err := range batchErrors {
   532  		logrus.Errorf("unable to get container info: %q", err)
   533  	}
   534  	for res := range results {
   535  		// We sort out running vs non-running here to save lots of copying
   536  		// later.
   537  		if !opts.All && !opts.Latest && opts.Last < 1 {
   538  			if !res.IsInfra && res.State == define.ContainerStateRunning {
   539  				psResults = append(psResults, res)
   540  			}
   541  		} else {
   542  			psResults = append(psResults, res)
   543  		}
   544  	}
   545  	return psResults
   546  }
   547  
   548  // BatchContainerOp is used in ps to reduce performance hits by "batching"
   549  // locks.
   550  func BatchContainerOp(ctr *libpod.Container, opts PsOptions) (BatchContainerStruct, error) {
   551  	var (
   552  		conConfig   *libpod.ContainerConfig
   553  		conState    define.ContainerStatus
   554  		err         error
   555  		exitCode    int32
   556  		exited      bool
   557  		pid         int
   558  		size        *ContainerSize
   559  		startedTime time.Time
   560  		exitedTime  time.Time
   561  	)
   562  
   563  	batchErr := ctr.Batch(func(c *libpod.Container) error {
   564  		conConfig = c.Config()
   565  		conState, err = c.State()
   566  		if err != nil {
   567  			return errors.Wrapf(err, "unable to obtain container state")
   568  		}
   569  
   570  		exitCode, exited, err = c.ExitCode()
   571  		if err != nil {
   572  			return errors.Wrapf(err, "unable to obtain container exit code")
   573  		}
   574  		startedTime, err = c.StartedTime()
   575  		if err != nil {
   576  			logrus.Errorf("error getting started time for %q: %v", c.ID(), err)
   577  		}
   578  		exitedTime, err = c.FinishedTime()
   579  		if err != nil {
   580  			logrus.Errorf("error getting exited time for %q: %v", c.ID(), err)
   581  		}
   582  
   583  		if !opts.Size && !opts.Namespace {
   584  			return nil
   585  		}
   586  
   587  		if opts.Namespace {
   588  			pid, err = c.PID()
   589  			if err != nil {
   590  				return errors.Wrapf(err, "unable to obtain container pid")
   591  			}
   592  		}
   593  		if opts.Size {
   594  			size = new(ContainerSize)
   595  
   596  			rootFsSize, err := c.RootFsSize()
   597  			if err != nil {
   598  				logrus.Errorf("error getting root fs size for %q: %v", c.ID(), err)
   599  			}
   600  
   601  			rwSize, err := c.RWSize()
   602  			if err != nil {
   603  				logrus.Errorf("error getting rw size for %q: %v", c.ID(), err)
   604  			}
   605  
   606  			size.RootFsSize = rootFsSize
   607  			size.RwSize = rwSize
   608  		}
   609  		return nil
   610  	})
   611  	if batchErr != nil {
   612  		return BatchContainerStruct{}, batchErr
   613  	}
   614  	return BatchContainerStruct{
   615  		ConConfig:   conConfig,
   616  		ConState:    conState,
   617  		ExitCode:    exitCode,
   618  		Exited:      exited,
   619  		Pid:         pid,
   620  		StartedTime: startedTime,
   621  		ExitedTime:  exitedTime,
   622  		Size:        size,
   623  	}, nil
   624  }
   625  
   626  // GetNamespaces returns a populated namespace struct.
   627  func GetNamespaces(pid int) *Namespace {
   628  	ctrPID := strconv.Itoa(pid)
   629  	cgroup, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "cgroup"))
   630  	ipc, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "ipc"))
   631  	mnt, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "mnt"))
   632  	net, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "net"))
   633  	pidns, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "pid"))
   634  	user, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "user"))
   635  	uts, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "uts"))
   636  
   637  	return &Namespace{
   638  		PID:    ctrPID,
   639  		Cgroup: cgroup,
   640  		IPC:    ipc,
   641  		MNT:    mnt,
   642  		NET:    net,
   643  		PIDNS:  pidns,
   644  		User:   user,
   645  		UTS:    uts,
   646  	}
   647  }
   648  
   649  // GetNamespaceInfo is an exported wrapper for getNamespaceInfo
   650  func GetNamespaceInfo(path string) (string, error) {
   651  	return getNamespaceInfo(path)
   652  }
   653  
   654  func getNamespaceInfo(path string) (string, error) {
   655  	val, err := os.Readlink(path)
   656  	if err != nil {
   657  		return "", errors.Wrapf(err, "error getting info from %q", path)
   658  	}
   659  	return getStrFromSquareBrackets(val), nil
   660  }
   661  
   662  // getStrFromSquareBrackets gets the string inside [] from a string.
   663  func getStrFromSquareBrackets(cmd string) string {
   664  	reg := regexp.MustCompile(`.*\[|\].*`)
   665  	arr := strings.Split(reg.ReplaceAllLiteralString(cmd, ""), ",")
   666  	return strings.Join(arr, ",")
   667  }
   668  
   669  func comparePorts(i, j ocicni.PortMapping) bool {
   670  	if i.ContainerPort != j.ContainerPort {
   671  		return i.ContainerPort < j.ContainerPort
   672  	}
   673  
   674  	if i.HostIP != j.HostIP {
   675  		return i.HostIP < j.HostIP
   676  	}
   677  
   678  	if i.HostPort != j.HostPort {
   679  		return i.HostPort < j.HostPort
   680  	}
   681  
   682  	return i.Protocol < j.Protocol
   683  }
   684  
   685  // formatGroup returns the group as <IP:startPort:lastPort->startPort:lastPort/Proto>
   686  // e.g 0.0.0.0:1000-1006->1000-1006/tcp.
   687  func formatGroup(key string, start, last int32) string {
   688  	parts := strings.Split(key, "/")
   689  	groupType := parts[0]
   690  	var ip string
   691  	if len(parts) > 1 {
   692  		ip = parts[0]
   693  		groupType = parts[1]
   694  	}
   695  	group := strconv.Itoa(int(start))
   696  	if start != last {
   697  		group = fmt.Sprintf("%s-%d", group, last)
   698  	}
   699  	if ip != "" {
   700  		group = fmt.Sprintf("%s:%s->%s", ip, group, group)
   701  	}
   702  	return fmt.Sprintf("%s/%s", group, groupType)
   703  }
   704  
   705  // portsToString converts the ports used to a string of the from "port1, port2"
   706  // and also groups a continuous list of ports into a readable format.
   707  func portsToString(ports []ocicni.PortMapping) string {
   708  	type portGroup struct {
   709  		first int32
   710  		last  int32
   711  	}
   712  	var portDisplay []string
   713  	if len(ports) == 0 {
   714  		return ""
   715  	}
   716  	//Sort the ports, so grouping continuous ports become easy.
   717  	sort.Slice(ports, func(i, j int) bool {
   718  		return comparePorts(ports[i], ports[j])
   719  	})
   720  
   721  	// portGroupMap is used for grouping continuous ports.
   722  	portGroupMap := make(map[string]*portGroup)
   723  	var groupKeyList []string
   724  
   725  	for _, v := range ports {
   726  
   727  		hostIP := v.HostIP
   728  		if hostIP == "" {
   729  			hostIP = "0.0.0.0"
   730  		}
   731  		// If hostPort and containerPort are not same, consider as individual port.
   732  		if v.ContainerPort != v.HostPort {
   733  			portDisplay = append(portDisplay, fmt.Sprintf("%s:%d->%d/%s", hostIP, v.HostPort, v.ContainerPort, v.Protocol))
   734  			continue
   735  		}
   736  
   737  		portMapKey := fmt.Sprintf("%s/%s", hostIP, v.Protocol)
   738  
   739  		portgroup, ok := portGroupMap[portMapKey]
   740  		if !ok {
   741  			portGroupMap[portMapKey] = &portGroup{first: v.ContainerPort, last: v.ContainerPort}
   742  			// This list is required to traverse portGroupMap.
   743  			groupKeyList = append(groupKeyList, portMapKey)
   744  			continue
   745  		}
   746  
   747  		if portgroup.last == (v.ContainerPort - 1) {
   748  			portgroup.last = v.ContainerPort
   749  			continue
   750  		}
   751  	}
   752  	// For each portMapKey, format group list and appned to output string.
   753  	for _, portKey := range groupKeyList {
   754  		group := portGroupMap[portKey]
   755  		portDisplay = append(portDisplay, formatGroup(portKey, group.first, group.last))
   756  	}
   757  	return strings.Join(portDisplay, ", ")
   758  }
   759  
   760  // GetRunlabel is a helper function for runlabel; it gets the image if needed and begins the
   761  // construction of the runlabel output and environment variables.
   762  func GetRunlabel(label string, runlabelImage string, ctx context.Context, runtime *libpod.Runtime, pull bool, inputCreds string, dockerRegistryOptions image.DockerRegistryOptions, authfile string, signaturePolicyPath string, output io.Writer) (string, string, error) {
   763  	var (
   764  		newImage  *image.Image
   765  		err       error
   766  		imageName string
   767  	)
   768  	if pull {
   769  		var registryCreds *types.DockerAuthConfig
   770  		if inputCreds != "" {
   771  			creds, err := util.ParseRegistryCreds(inputCreds)
   772  			if err != nil {
   773  				return "", "", err
   774  			}
   775  			registryCreds = creds
   776  		}
   777  		dockerRegistryOptions.DockerRegistryCreds = registryCreds
   778  		newImage, err = runtime.ImageRuntime().New(ctx, runlabelImage, signaturePolicyPath, authfile, output, &dockerRegistryOptions, image.SigningOptions{}, &label, util.PullImageMissing)
   779  	} else {
   780  		newImage, err = runtime.ImageRuntime().NewFromLocal(runlabelImage)
   781  	}
   782  	if err != nil {
   783  		return "", "", errors.Wrapf(err, "unable to find image")
   784  	}
   785  
   786  	if len(newImage.Names()) < 1 {
   787  		imageName = newImage.ID()
   788  	} else {
   789  		imageName = newImage.Names()[0]
   790  	}
   791  
   792  	runLabel, err := newImage.GetLabel(ctx, label)
   793  	return runLabel, imageName, err
   794  }
   795  
   796  // GenerateRunlabelCommand generates the command that will eventually be execucted by Podman.
   797  func GenerateRunlabelCommand(runLabel, imageName, name string, opts map[string]string, extraArgs []string, globalOpts string) ([]string, []string, error) {
   798  	// If no name is provided, we use the image's basename instead.
   799  	if name == "" {
   800  		baseName, err := image.GetImageBaseName(imageName)
   801  		if err != nil {
   802  			return nil, nil, err
   803  		}
   804  		name = baseName
   805  	}
   806  	// The user provided extra arguments that need to be tacked onto the label's command.
   807  	if len(extraArgs) > 0 {
   808  		runLabel = fmt.Sprintf("%s %s", runLabel, strings.Join(extraArgs, " "))
   809  	}
   810  	cmd, err := GenerateCommand(runLabel, imageName, name, globalOpts)
   811  	if err != nil {
   812  		return nil, nil, errors.Wrapf(err, "unable to generate command")
   813  	}
   814  	env := GenerateRunEnvironment(name, imageName, opts)
   815  	env = append(env, "PODMAN_RUNLABEL_NESTED=1")
   816  
   817  	envmap := envSliceToMap(env)
   818  
   819  	envmapper := func(k string) string {
   820  		switch k {
   821  		case "OPT1":
   822  			return envmap["OPT1"]
   823  		case "OPT2":
   824  			return envmap["OPT2"]
   825  		case "OPT3":
   826  			return envmap["OPT3"]
   827  		case "PWD":
   828  			// I would prefer to use os.getenv but it appears PWD is not in the os env list.
   829  			d, err := os.Getwd()
   830  			if err != nil {
   831  				logrus.Error("unable to determine current working directory")
   832  				return ""
   833  			}
   834  			return d
   835  		}
   836  		return ""
   837  	}
   838  	newS := os.Expand(strings.Join(cmd, " "), envmapper)
   839  	cmd, err = shlex.Split(newS)
   840  	if err != nil {
   841  		return nil, nil, err
   842  	}
   843  	return cmd, env, nil
   844  }
   845  
   846  func envSliceToMap(env []string) map[string]string {
   847  	m := make(map[string]string)
   848  	for _, i := range env {
   849  		split := strings.Split(i, "=")
   850  		m[split[0]] = strings.Join(split[1:], " ")
   851  	}
   852  	return m
   853  }
   854  
   855  // GenerateKube generates kubernetes yaml based on a pod or container.
   856  func GenerateKube(name string, service bool, r *libpod.Runtime) (*v1.Pod, *v1.Service, error) {
   857  	var (
   858  		pod          *libpod.Pod
   859  		podYAML      *v1.Pod
   860  		err          error
   861  		container    *libpod.Container
   862  		servicePorts []v1.ServicePort
   863  		serviceYAML  v1.Service
   864  	)
   865  	// Get the container in question.
   866  	container, err = r.LookupContainer(name)
   867  	if err != nil {
   868  		pod, err = r.LookupPod(name)
   869  		if err != nil {
   870  			return nil, nil, err
   871  		}
   872  		podYAML, servicePorts, err = pod.GenerateForKube()
   873  	} else {
   874  		if len(container.Dependencies()) > 0 {
   875  			return nil, nil, errors.Wrapf(define.ErrNotImplemented, "containers with dependencies")
   876  		}
   877  		podYAML, err = container.GenerateForKube()
   878  	}
   879  	if err != nil {
   880  		return nil, nil, err
   881  	}
   882  
   883  	if service {
   884  		serviceYAML = libpod.GenerateKubeServiceFromV1Pod(podYAML, servicePorts)
   885  	}
   886  	return podYAML, &serviceYAML, nil
   887  }