github.com/containers/podman/v2@v2.2.2-0.20210501105131-c1e07d070c4c/libpod/pod_api.go (about)

     1  package libpod
     2  
     3  import (
     4  	"context"
     5  
     6  	"github.com/containers/podman/v2/libpod/define"
     7  	"github.com/containers/podman/v2/libpod/events"
     8  	"github.com/containers/podman/v2/pkg/cgroups"
     9  	"github.com/containers/podman/v2/pkg/parallel"
    10  	"github.com/containers/podman/v2/pkg/rootless"
    11  	"github.com/pkg/errors"
    12  	"github.com/sirupsen/logrus"
    13  )
    14  
    15  // Start starts all containers within a pod.
    16  // It combines the effects of Init() and Start() on a container.
    17  // If a container has already been initialized it will be started,
    18  // otherwise it will be initialized then started.
    19  // Containers that are already running or have been paused are ignored
    20  // All containers are started independently, in order dictated by their
    21  // dependencies.
    22  // An error and a map[string]error are returned.
    23  // If the error is not nil and the map is nil, an error was encountered before
    24  // any containers were started.
    25  // If map is not nil, an error was encountered when starting one or more
    26  // containers. The container ID is mapped to the error encountered. The error is
    27  // set to ErrPodPartialFail.
    28  // If both error and the map are nil, all containers were started successfully.
    29  func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
    30  	p.lock.Lock()
    31  	defer p.lock.Unlock()
    32  
    33  	if !p.valid {
    34  		return nil, define.ErrPodRemoved
    35  	}
    36  
    37  	allCtrs, err := p.runtime.state.PodContainers(p)
    38  	if err != nil {
    39  		return nil, err
    40  	}
    41  
    42  	// Build a dependency graph of containers in the pod
    43  	graph, err := BuildContainerGraph(allCtrs)
    44  	if err != nil {
    45  		return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
    46  	}
    47  
    48  	ctrErrors := make(map[string]error)
    49  	ctrsVisited := make(map[string]bool)
    50  
    51  	// If there are no containers without dependencies, we can't start
    52  	// Error out
    53  	if len(graph.noDepNodes) == 0 {
    54  		return nil, errors.Wrapf(define.ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
    55  	}
    56  
    57  	// Traverse the graph beginning at nodes with no dependencies
    58  	for _, node := range graph.noDepNodes {
    59  		startNode(ctx, node, false, ctrErrors, ctrsVisited, false)
    60  	}
    61  
    62  	if len(ctrErrors) > 0 {
    63  		return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error starting some containers")
    64  	}
    65  	defer p.newPodEvent(events.Start)
    66  	return nil, nil
    67  }
    68  
    69  // Stop stops all containers within a pod without a timeout.  It assumes -1 for
    70  // a timeout.
    71  func (p *Pod) Stop(ctx context.Context, cleanup bool) (map[string]error, error) {
    72  	return p.StopWithTimeout(ctx, cleanup, -1)
    73  }
    74  
    75  // StopWithTimeout stops all containers within a pod that are not already stopped
    76  // Each container will use its own stop timeout.
    77  // Only running containers will be stopped. Paused, stopped, or created
    78  // containers will be ignored.
    79  // If cleanup is true, mounts and network namespaces will be cleaned up after
    80  // the container is stopped.
    81  // All containers are stopped independently. An error stopping one container
    82  // will not prevent other containers being stopped.
    83  // An error and a map[string]error are returned.
    84  // If the error is not nil and the map is nil, an error was encountered before
    85  // any containers were stopped.
    86  // If map is not nil, an error was encountered when stopping one or more
    87  // containers. The container ID is mapped to the error encountered. The error is
    88  // set to ErrPodPartialFail.
    89  // If both error and the map are nil, all containers were stopped without error.
    90  func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (map[string]error, error) {
    91  	p.lock.Lock()
    92  	defer p.lock.Unlock()
    93  
    94  	if !p.valid {
    95  		return nil, define.ErrPodRemoved
    96  	}
    97  
    98  	allCtrs, err := p.runtime.state.PodContainers(p)
    99  	if err != nil {
   100  		return nil, err
   101  	}
   102  
   103  	// TODO: There may be cases where it makes sense to order stops based on
   104  	// dependencies. Should we bother with this?
   105  
   106  	ctrErrChan := make(map[string]<-chan error)
   107  
   108  	// Enqueue a function for each container with the parallel executor.
   109  	for _, ctr := range allCtrs {
   110  		c := ctr
   111  		logrus.Debugf("Adding parallel job to stop container %s", c.ID())
   112  		retChan := parallel.Enqueue(ctx, func() error {
   113  			// TODO: Might be better to batch stop and cleanup
   114  			// together?
   115  			if timeout > -1 {
   116  				if err := c.StopWithTimeout(uint(timeout)); err != nil {
   117  					return err
   118  				}
   119  			} else {
   120  				if err := c.Stop(); err != nil {
   121  					return err
   122  				}
   123  			}
   124  
   125  			if cleanup {
   126  				return c.Cleanup(ctx)
   127  			}
   128  
   129  			return nil
   130  		})
   131  
   132  		ctrErrChan[c.ID()] = retChan
   133  	}
   134  
   135  	p.newPodEvent(events.Stop)
   136  
   137  	ctrErrors := make(map[string]error)
   138  
   139  	// Get returned error for every container we worked on
   140  	for id, channel := range ctrErrChan {
   141  		if err := <-channel; err != nil {
   142  			if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
   143  				continue
   144  			}
   145  			ctrErrors[id] = err
   146  		}
   147  	}
   148  
   149  	if len(ctrErrors) > 0 {
   150  		return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error stopping some containers")
   151  	}
   152  	return nil, nil
   153  }
   154  
   155  // Cleanup cleans up all containers within a pod that have stopped.
   156  // All containers are cleaned up independently. An error with one container will
   157  // not prevent other containers being cleaned up.
   158  // An error and a map[string]error are returned.
   159  // If the error is not nil and the map is nil, an error was encountered before
   160  // any containers were cleaned up.
   161  // If map is not nil, an error was encountered when working on one or more
   162  // containers. The container ID is mapped to the error encountered. The error is
   163  // set to ErrPodPartialFail.
   164  // If both error and the map are nil, all containers were paused without error
   165  func (p *Pod) Cleanup(ctx context.Context) (map[string]error, error) {
   166  	p.lock.Lock()
   167  	defer p.lock.Unlock()
   168  
   169  	if !p.valid {
   170  		return nil, define.ErrPodRemoved
   171  	}
   172  
   173  	allCtrs, err := p.runtime.state.PodContainers(p)
   174  	if err != nil {
   175  		return nil, err
   176  	}
   177  
   178  	ctrErrChan := make(map[string]<-chan error)
   179  
   180  	// Enqueue a function for each container with the parallel executor.
   181  	for _, ctr := range allCtrs {
   182  		c := ctr
   183  		logrus.Debugf("Adding parallel job to clean up container %s", c.ID())
   184  		retChan := parallel.Enqueue(ctx, func() error {
   185  			return c.Cleanup(ctx)
   186  		})
   187  
   188  		ctrErrChan[c.ID()] = retChan
   189  	}
   190  
   191  	ctrErrors := make(map[string]error)
   192  
   193  	// Get returned error for every container we worked on
   194  	for id, channel := range ctrErrChan {
   195  		if err := <-channel; err != nil {
   196  			if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
   197  				continue
   198  			}
   199  			ctrErrors[id] = err
   200  		}
   201  	}
   202  
   203  	if len(ctrErrors) > 0 {
   204  		return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error cleaning up some containers")
   205  	}
   206  
   207  	return nil, nil
   208  }
   209  
   210  // Pause pauses all containers within a pod that are running.
   211  // Only running containers will be paused. Paused, stopped, or created
   212  // containers will be ignored.
   213  // All containers are paused independently. An error pausing one container
   214  // will not prevent other containers being paused.
   215  // An error and a map[string]error are returned.
   216  // If the error is not nil and the map is nil, an error was encountered before
   217  // any containers were paused.
   218  // If map is not nil, an error was encountered when pausing one or more
   219  // containers. The container ID is mapped to the error encountered. The error is
   220  // set to ErrPodPartialFail.
   221  // If both error and the map are nil, all containers were paused without error
   222  func (p *Pod) Pause(ctx context.Context) (map[string]error, error) {
   223  	p.lock.Lock()
   224  	defer p.lock.Unlock()
   225  
   226  	if !p.valid {
   227  		return nil, define.ErrPodRemoved
   228  	}
   229  
   230  	if rootless.IsRootless() {
   231  		cgroupv2, err := cgroups.IsCgroup2UnifiedMode()
   232  		if err != nil {
   233  			return nil, errors.Wrap(err, "failed to determine cgroupversion")
   234  		}
   235  		if !cgroupv2 {
   236  			return nil, errors.Wrap(define.ErrNoCgroups, "can not pause pods containing rootless containers with cgroup V1")
   237  		}
   238  	}
   239  
   240  	allCtrs, err := p.runtime.state.PodContainers(p)
   241  	if err != nil {
   242  		return nil, err
   243  	}
   244  
   245  	ctrErrChan := make(map[string]<-chan error)
   246  
   247  	// Enqueue a function for each container with the parallel executor.
   248  	for _, ctr := range allCtrs {
   249  		c := ctr
   250  		logrus.Debugf("Adding parallel job to pause container %s", c.ID())
   251  		retChan := parallel.Enqueue(ctx, c.Pause)
   252  
   253  		ctrErrChan[c.ID()] = retChan
   254  	}
   255  
   256  	p.newPodEvent(events.Pause)
   257  
   258  	ctrErrors := make(map[string]error)
   259  
   260  	// Get returned error for every container we worked on
   261  	for id, channel := range ctrErrChan {
   262  		if err := <-channel; err != nil {
   263  			if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
   264  				continue
   265  			}
   266  			ctrErrors[id] = err
   267  		}
   268  	}
   269  
   270  	if len(ctrErrors) > 0 {
   271  		return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error pausing some containers")
   272  	}
   273  	return nil, nil
   274  }
   275  
   276  // Unpause unpauses all containers within a pod that are running.
   277  // Only paused containers will be unpaused. Running, stopped, or created
   278  // containers will be ignored.
   279  // All containers are unpaused independently. An error unpausing one container
   280  // will not prevent other containers being unpaused.
   281  // An error and a map[string]error are returned.
   282  // If the error is not nil and the map is nil, an error was encountered before
   283  // any containers were unpaused.
   284  // If map is not nil, an error was encountered when unpausing one or more
   285  // containers. The container ID is mapped to the error encountered. The error is
   286  // set to ErrPodPartialFail.
   287  // If both error and the map are nil, all containers were unpaused without error.
   288  func (p *Pod) Unpause(ctx context.Context) (map[string]error, error) {
   289  	p.lock.Lock()
   290  	defer p.lock.Unlock()
   291  
   292  	if !p.valid {
   293  		return nil, define.ErrPodRemoved
   294  	}
   295  
   296  	allCtrs, err := p.runtime.state.PodContainers(p)
   297  	if err != nil {
   298  		return nil, err
   299  	}
   300  
   301  	ctrErrChan := make(map[string]<-chan error)
   302  
   303  	// Enqueue a function for each container with the parallel executor.
   304  	for _, ctr := range allCtrs {
   305  		c := ctr
   306  		logrus.Debugf("Adding parallel job to unpause container %s", c.ID())
   307  		retChan := parallel.Enqueue(ctx, c.Unpause)
   308  
   309  		ctrErrChan[c.ID()] = retChan
   310  	}
   311  
   312  	p.newPodEvent(events.Unpause)
   313  
   314  	ctrErrors := make(map[string]error)
   315  
   316  	// Get returned error for every container we worked on
   317  	for id, channel := range ctrErrChan {
   318  		if err := <-channel; err != nil {
   319  			if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
   320  				continue
   321  			}
   322  			ctrErrors[id] = err
   323  		}
   324  	}
   325  
   326  	if len(ctrErrors) > 0 {
   327  		return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error unpausing some containers")
   328  	}
   329  	return nil, nil
   330  }
   331  
   332  // Restart restarts all containers within a pod that are not paused or in an error state.
   333  // It combines the effects of Stop() and Start() on a container
   334  // Each container will use its own stop timeout.
   335  // All containers are started independently, in order dictated by their
   336  // dependencies. An error restarting one container
   337  // will not prevent other containers being restarted.
   338  // An error and a map[string]error are returned.
   339  // If the error is not nil and the map is nil, an error was encountered before
   340  // any containers were restarted.
   341  // If map is not nil, an error was encountered when restarting one or more
   342  // containers. The container ID is mapped to the error encountered. The error is
   343  // set to ErrPodPartialFail.
   344  // If both error and the map are nil, all containers were restarted without error.
   345  func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
   346  	p.lock.Lock()
   347  	defer p.lock.Unlock()
   348  
   349  	if !p.valid {
   350  		return nil, define.ErrPodRemoved
   351  	}
   352  
   353  	allCtrs, err := p.runtime.state.PodContainers(p)
   354  	if err != nil {
   355  		return nil, err
   356  	}
   357  
   358  	// Build a dependency graph of containers in the pod
   359  	graph, err := BuildContainerGraph(allCtrs)
   360  	if err != nil {
   361  		return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
   362  	}
   363  
   364  	ctrErrors := make(map[string]error)
   365  	ctrsVisited := make(map[string]bool)
   366  
   367  	// If there are no containers without dependencies, we can't start
   368  	// Error out
   369  	if len(graph.noDepNodes) == 0 {
   370  		return nil, errors.Wrapf(define.ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
   371  	}
   372  
   373  	// Traverse the graph beginning at nodes with no dependencies
   374  	for _, node := range graph.noDepNodes {
   375  		startNode(ctx, node, false, ctrErrors, ctrsVisited, true)
   376  	}
   377  
   378  	if len(ctrErrors) > 0 {
   379  		return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error stopping some containers")
   380  	}
   381  	p.newPodEvent(events.Stop)
   382  	p.newPodEvent(events.Start)
   383  	return nil, nil
   384  }
   385  
   386  // Kill sends a signal to all running containers within a pod.
   387  // Signals will only be sent to running containers. Containers that are not
   388  // running will be ignored. All signals are sent independently, and sending will
   389  // continue even if some containers encounter errors.
   390  // An error and a map[string]error are returned.
   391  // If the error is not nil and the map is nil, an error was encountered before
   392  // any containers were signalled.
   393  // If map is not nil, an error was encountered when signalling one or more
   394  // containers. The container ID is mapped to the error encountered. The error is
   395  // set to ErrPodPartialFail.
   396  // If both error and the map are nil, all containers were signalled successfully.
   397  func (p *Pod) Kill(ctx context.Context, signal uint) (map[string]error, error) {
   398  	p.lock.Lock()
   399  	defer p.lock.Unlock()
   400  
   401  	if !p.valid {
   402  		return nil, define.ErrPodRemoved
   403  	}
   404  
   405  	allCtrs, err := p.runtime.state.PodContainers(p)
   406  	if err != nil {
   407  		return nil, err
   408  	}
   409  
   410  	ctrErrChan := make(map[string]<-chan error)
   411  
   412  	// Enqueue a function for each container with the parallel executor.
   413  	for _, ctr := range allCtrs {
   414  		c := ctr
   415  		logrus.Debugf("Adding parallel job to kill container %s", c.ID())
   416  		retChan := parallel.Enqueue(ctx, func() error {
   417  			return c.Kill(signal)
   418  		})
   419  
   420  		ctrErrChan[c.ID()] = retChan
   421  	}
   422  
   423  	p.newPodEvent(events.Kill)
   424  
   425  	ctrErrors := make(map[string]error)
   426  
   427  	// Get returned error for every container we worked on
   428  	for id, channel := range ctrErrChan {
   429  		if err := <-channel; err != nil {
   430  			if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
   431  				continue
   432  			}
   433  			ctrErrors[id] = err
   434  		}
   435  	}
   436  
   437  	if len(ctrErrors) > 0 {
   438  		return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error killing some containers")
   439  	}
   440  	return nil, nil
   441  }
   442  
   443  // Status gets the status of all containers in the pod.
   444  // Returns a map of Container ID to Container Status.
   445  func (p *Pod) Status() (map[string]define.ContainerStatus, error) {
   446  	p.lock.Lock()
   447  	defer p.lock.Unlock()
   448  
   449  	if !p.valid {
   450  		return nil, define.ErrPodRemoved
   451  	}
   452  
   453  	allCtrs, err := p.runtime.state.PodContainers(p)
   454  	if err != nil {
   455  		return nil, err
   456  	}
   457  	return containerStatusFromContainers(allCtrs)
   458  }
   459  
   460  func containerStatusFromContainers(allCtrs []*Container) (map[string]define.ContainerStatus, error) {
   461  	// We need to lock all the containers
   462  	for _, ctr := range allCtrs {
   463  		ctr.lock.Lock()
   464  		defer ctr.lock.Unlock()
   465  	}
   466  
   467  	// Now that all containers are locked, get their status
   468  	status := make(map[string]define.ContainerStatus, len(allCtrs))
   469  	for _, ctr := range allCtrs {
   470  		if err := ctr.syncContainer(); err != nil {
   471  			return nil, err
   472  		}
   473  
   474  		status[ctr.ID()] = ctr.state.State
   475  	}
   476  
   477  	return status, nil
   478  }
   479  
   480  // Inspect returns a PodInspect struct to describe the pod.
   481  func (p *Pod) Inspect() (*define.InspectPodData, error) {
   482  	p.lock.Lock()
   483  	defer p.lock.Unlock()
   484  	if err := p.updatePod(); err != nil {
   485  		return nil, err
   486  	}
   487  
   488  	containers, err := p.runtime.state.PodContainers(p)
   489  	if err != nil {
   490  		return nil, err
   491  	}
   492  	ctrs := make([]define.InspectPodContainerInfo, 0, len(containers))
   493  	ctrStatuses := make(map[string]define.ContainerStatus, len(containers))
   494  	for _, c := range containers {
   495  		containerStatus := "unknown"
   496  		// Ignoring possible errors here because we don't want this to be
   497  		// catastrophic in nature
   498  		containerState, err := c.State()
   499  		if err == nil {
   500  			containerStatus = containerState.String()
   501  		}
   502  		ctrs = append(ctrs, define.InspectPodContainerInfo{
   503  			ID:    c.ID(),
   504  			Name:  c.Name(),
   505  			State: containerStatus,
   506  		})
   507  		ctrStatuses[c.ID()] = c.state.State
   508  	}
   509  	podState, err := createPodStatusResults(ctrStatuses)
   510  	if err != nil {
   511  		return nil, err
   512  	}
   513  
   514  	namespaces := map[string]bool{
   515  		"pid":    p.config.UsePodPID,
   516  		"ipc":    p.config.UsePodIPC,
   517  		"net":    p.config.UsePodNet,
   518  		"mount":  p.config.UsePodMount,
   519  		"user":   p.config.UsePodUser,
   520  		"uts":    p.config.UsePodUTS,
   521  		"cgroup": p.config.UsePodCgroupNS,
   522  	}
   523  
   524  	sharesNS := []string{}
   525  	for nsStr, include := range namespaces {
   526  		if include {
   527  			sharesNS = append(sharesNS, nsStr)
   528  		}
   529  	}
   530  
   531  	// Infra config contains detailed information on the pod's infra
   532  	// container.
   533  	var infraConfig *define.InspectPodInfraConfig
   534  	if p.config.InfraContainer != nil && p.config.InfraContainer.HasInfraContainer {
   535  		infraConfig = new(define.InspectPodInfraConfig)
   536  		infraConfig.HostNetwork = p.config.InfraContainer.HostNetwork
   537  		infraConfig.StaticIP = p.config.InfraContainer.StaticIP
   538  		infraConfig.StaticMAC = p.config.InfraContainer.StaticMAC.String()
   539  		infraConfig.NoManageResolvConf = p.config.InfraContainer.UseImageResolvConf
   540  		infraConfig.NoManageHosts = p.config.InfraContainer.UseImageHosts
   541  
   542  		if len(p.config.InfraContainer.DNSServer) > 0 {
   543  			infraConfig.DNSServer = make([]string, 0, len(p.config.InfraContainer.DNSServer))
   544  			infraConfig.DNSServer = append(infraConfig.DNSServer, p.config.InfraContainer.DNSServer...)
   545  		}
   546  		if len(p.config.InfraContainer.DNSSearch) > 0 {
   547  			infraConfig.DNSSearch = make([]string, 0, len(p.config.InfraContainer.DNSSearch))
   548  			infraConfig.DNSSearch = append(infraConfig.DNSSearch, p.config.InfraContainer.DNSSearch...)
   549  		}
   550  		if len(p.config.InfraContainer.DNSOption) > 0 {
   551  			infraConfig.DNSOption = make([]string, 0, len(p.config.InfraContainer.DNSOption))
   552  			infraConfig.DNSOption = append(infraConfig.DNSOption, p.config.InfraContainer.DNSOption...)
   553  		}
   554  		if len(p.config.InfraContainer.HostAdd) > 0 {
   555  			infraConfig.HostAdd = make([]string, 0, len(p.config.InfraContainer.HostAdd))
   556  			infraConfig.HostAdd = append(infraConfig.HostAdd, p.config.InfraContainer.HostAdd...)
   557  		}
   558  		if len(p.config.InfraContainer.Networks) > 0 {
   559  			infraConfig.Networks = make([]string, 0, len(p.config.InfraContainer.Networks))
   560  			infraConfig.Networks = append(infraConfig.Networks, p.config.InfraContainer.Networks...)
   561  		}
   562  		infraConfig.NetworkOptions = p.config.InfraContainer.NetworkOptions
   563  		infraConfig.PortBindings = makeInspectPortBindings(p.config.InfraContainer.PortBindings)
   564  	}
   565  
   566  	inspectData := define.InspectPodData{
   567  		ID:               p.ID(),
   568  		Name:             p.Name(),
   569  		Namespace:        p.Namespace(),
   570  		Created:          p.CreatedTime(),
   571  		CreateCommand:    p.config.CreateCommand,
   572  		State:            podState,
   573  		Hostname:         p.config.Hostname,
   574  		Labels:           p.Labels(),
   575  		CreateCgroup:     p.config.UsePodCgroup,
   576  		CgroupParent:     p.CgroupParent(),
   577  		CgroupPath:       p.state.CgroupPath,
   578  		CreateInfra:      infraConfig != nil,
   579  		InfraContainerID: p.state.InfraContainerID,
   580  		InfraConfig:      infraConfig,
   581  		SharedNamespaces: sharesNS,
   582  		NumContainers:    uint(len(containers)),
   583  		Containers:       ctrs,
   584  	}
   585  
   586  	return &inspectData, nil
   587  }