github.com/nicocha30/gvisor-ligolo@v0.0.0-20230726075806-989fa2c0a413/runsc/boot/controller.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package boot
    16  
    17  import (
    18  	"errors"
    19  	"fmt"
    20  	"os"
    21  	gtime "time"
    22  
    23  	specs "github.com/opencontainers/runtime-spec/specs-go"
    24  	"golang.org/x/sys/unix"
    25  	"github.com/nicocha30/gvisor-ligolo/pkg/control/server"
    26  	"github.com/nicocha30/gvisor-ligolo/pkg/fd"
    27  	"github.com/nicocha30/gvisor-ligolo/pkg/log"
    28  	"github.com/nicocha30/gvisor-ligolo/pkg/sentry/control"
    29  	"github.com/nicocha30/gvisor-ligolo/pkg/sentry/kernel"
    30  	"github.com/nicocha30/gvisor-ligolo/pkg/sentry/seccheck"
    31  	"github.com/nicocha30/gvisor-ligolo/pkg/sentry/socket/netstack"
    32  	"github.com/nicocha30/gvisor-ligolo/pkg/sentry/state"
    33  	"github.com/nicocha30/gvisor-ligolo/pkg/sentry/time"
    34  	"github.com/nicocha30/gvisor-ligolo/pkg/sentry/vfs"
    35  	"github.com/nicocha30/gvisor-ligolo/pkg/sentry/watchdog"
    36  	"github.com/nicocha30/gvisor-ligolo/pkg/tcpip/stack"
    37  	"github.com/nicocha30/gvisor-ligolo/pkg/urpc"
    38  	"github.com/nicocha30/gvisor-ligolo/runsc/boot/pprof"
    39  	"github.com/nicocha30/gvisor-ligolo/runsc/boot/procfs"
    40  	"github.com/nicocha30/gvisor-ligolo/runsc/config"
    41  	"github.com/nicocha30/gvisor-ligolo/runsc/specutils"
    42  )
    43  
    44  const (
    45  	// ContMgrCheckpoint checkpoints a container.
    46  	ContMgrCheckpoint = "containerManager.Checkpoint"
    47  
    48  	// ContMgrCreateSubcontainer creates a sub-container.
    49  	ContMgrCreateSubcontainer = "containerManager.CreateSubcontainer"
    50  
    51  	// ContMgrDestroySubcontainer is used to stop a sub-container and free all
    52  	// associated resources in the sandbox.
    53  	ContMgrDestroySubcontainer = "containerManager.DestroySubcontainer"
    54  
    55  	// ContMgrEvent gets stats about the container used by "runsc events".
    56  	ContMgrEvent = "containerManager.Event"
    57  
    58  	// ContMgrExecuteAsync executes a command in a container.
    59  	ContMgrExecuteAsync = "containerManager.ExecuteAsync"
    60  
    61  	// ContMgrPortForward starts port forwarding with the sandbox.
    62  	ContMgrPortForward = "containerManager.PortForward"
    63  
    64  	// ContMgrProcesses lists processes running in a container.
    65  	ContMgrProcesses = "containerManager.Processes"
    66  
    67  	// ContMgrRestore restores a container from a statefile.
    68  	ContMgrRestore = "containerManager.Restore"
    69  
    70  	// ContMgrSignal sends a signal to a container.
    71  	ContMgrSignal = "containerManager.Signal"
    72  
    73  	// ContMgrStartSubcontainer starts a sub-container inside a running sandbox.
    74  	ContMgrStartSubcontainer = "containerManager.StartSubcontainer"
    75  
    76  	// ContMgrWait waits on the init process of the container and returns its
    77  	// ExitStatus.
    78  	ContMgrWait = "containerManager.Wait"
    79  
    80  	// ContMgrWaitPID waits on a process with a certain PID in the sandbox and
    81  	// return its ExitStatus.
    82  	ContMgrWaitPID = "containerManager.WaitPID"
    83  
    84  	// ContMgrRootContainerStart starts a new sandbox with a root container.
    85  	ContMgrRootContainerStart = "containerManager.StartRoot"
    86  
    87  	// ContMgrCreateTraceSession starts a trace session.
    88  	ContMgrCreateTraceSession = "containerManager.CreateTraceSession"
    89  
    90  	// ContMgrDeleteTraceSession deletes a trace session.
    91  	ContMgrDeleteTraceSession = "containerManager.DeleteTraceSession"
    92  
    93  	// ContMgrListTraceSessions lists a trace session.
    94  	ContMgrListTraceSessions = "containerManager.ListTraceSessions"
    95  
    96  	// ContMgrProcfsDump dumps sandbox procfs state.
    97  	ContMgrProcfsDump = "containerManager.ProcfsDump"
    98  )
    99  
   100  const (
   101  	// NetworkCreateLinksAndRoutes creates links and routes in a network stack.
   102  	NetworkCreateLinksAndRoutes = "Network.CreateLinksAndRoutes"
   103  
   104  	// DebugStacks collects sandbox stacks for debugging.
   105  	DebugStacks = "debug.Stacks"
   106  )
   107  
   108  // Profiling related commands (see pprof.go for more details).
   109  const (
   110  	ProfileCPU   = "Profile.CPU"
   111  	ProfileHeap  = "Profile.Heap"
   112  	ProfileBlock = "Profile.Block"
   113  	ProfileMutex = "Profile.Mutex"
   114  	ProfileTrace = "Profile.Trace"
   115  )
   116  
   117  // Logging related commands (see logging.go for more details).
   118  const (
   119  	LoggingChange = "Logging.Change"
   120  )
   121  
   122  // Lifecycle related commands (see lifecycle.go for more details).
   123  const (
   124  	LifecyclePause  = "Lifecycle.Pause"
   125  	LifecycleResume = "Lifecycle.Resume"
   126  )
   127  
   128  // Usage related commands (see usage.go for more details).
   129  const (
   130  	UsageCollect = "Usage.Collect"
   131  	UsageUsageFD = "Usage.UsageFD"
   132  )
   133  
   134  // Metrics related commands (see metrics.go).
   135  const (
   136  	MetricsGetRegistered = "Metrics.GetRegisteredMetrics"
   137  	MetricsExport        = "Metrics.Export"
   138  )
   139  
   140  // Commands for interacting with cgroupfs within the sandbox.
   141  const (
   142  	CgroupsReadControlFiles  = "Cgroups.ReadControlFiles"
   143  	CgroupsWriteControlFiles = "Cgroups.WriteControlFiles"
   144  )
   145  
   146  // controller holds the control server, and is used for communication into the
   147  // sandbox.
   148  type controller struct {
   149  	// srv is the control server.
   150  	srv *server.Server
   151  
   152  	// manager holds the containerManager methods.
   153  	manager *containerManager
   154  }
   155  
   156  // newController creates a new controller. The caller must call
   157  // controller.srv.StartServing() to start the controller.
   158  func newController(fd int, l *Loader) (*controller, error) {
   159  	srv, err := server.CreateFromFD(fd)
   160  	if err != nil {
   161  		return nil, err
   162  	}
   163  
   164  	ctrl := &controller{
   165  		manager: &containerManager{
   166  			startChan:       make(chan struct{}),
   167  			startResultChan: make(chan error),
   168  			l:               l,
   169  		},
   170  		srv: srv,
   171  	}
   172  	ctrl.srv.Register(ctrl.manager)
   173  	ctrl.srv.Register(&control.Cgroups{Kernel: l.k})
   174  	ctrl.srv.Register(&control.Lifecycle{Kernel: l.k})
   175  	ctrl.srv.Register(&control.Logging{})
   176  	ctrl.srv.Register(&control.Proc{Kernel: l.k})
   177  	ctrl.srv.Register(&control.State{Kernel: l.k})
   178  	ctrl.srv.Register(&control.Usage{Kernel: l.k})
   179  	ctrl.srv.Register(&control.Metrics{})
   180  	ctrl.srv.Register(&debug{})
   181  
   182  	if eps, ok := l.k.RootNetworkNamespace().Stack().(*netstack.Stack); ok {
   183  		ctrl.srv.Register(&Network{Stack: eps.Stack})
   184  	}
   185  	if l.root.conf.ProfileEnable {
   186  		ctrl.srv.Register(control.NewProfile(l.k))
   187  	}
   188  	return ctrl, nil
   189  }
   190  
   191  // stopRPCTimeout is the time for clients to complete ongoing RPCs.
   192  const stopRPCTimeout = 15 * gtime.Second
   193  
   194  func (c *controller) stop() {
   195  	c.srv.Stop(stopRPCTimeout)
   196  }
   197  
   198  // containerManager manages sandbox containers.
   199  type containerManager struct {
   200  	// startChan is used to signal when the root container process should
   201  	// be started.
   202  	startChan chan struct{}
   203  
   204  	// startResultChan is used to signal when the root container  has
   205  	// started. Any errors encountered during startup will be sent to the
   206  	// channel. A nil value indicates success.
   207  	startResultChan chan error
   208  
   209  	// l is the loader that creates containers and sandboxes.
   210  	l *Loader
   211  }
   212  
   213  // StartRoot will start the root container process.
   214  func (cm *containerManager) StartRoot(cid *string, _ *struct{}) error {
   215  	log.Debugf("containerManager.StartRoot, cid: %s", *cid)
   216  	// Tell the root container to start and wait for the result.
   217  	cm.startChan <- struct{}{}
   218  	if err := <-cm.startResultChan; err != nil {
   219  		return fmt.Errorf("starting sandbox: %v", err)
   220  	}
   221  	return nil
   222  }
   223  
   224  // Processes retrieves information about processes running in the sandbox.
   225  func (cm *containerManager) Processes(cid *string, out *[]*control.Process) error {
   226  	log.Debugf("containerManager.Processes, cid: %s", *cid)
   227  	return control.Processes(cm.l.k, *cid, out)
   228  }
   229  
   230  // CreateArgs contains arguments to the Create method.
   231  type CreateArgs struct {
   232  	// CID is the ID of the container to start.
   233  	CID string
   234  
   235  	// FilePayload may contain a TTY file for the terminal, if enabled.
   236  	urpc.FilePayload
   237  }
   238  
   239  // CreateSubcontainer creates a container within a sandbox.
   240  func (cm *containerManager) CreateSubcontainer(args *CreateArgs, _ *struct{}) error {
   241  	log.Debugf("containerManager.CreateSubcontainer: %s", args.CID)
   242  
   243  	if len(args.Files) > 1 {
   244  		return fmt.Errorf("start arguments must have at most 1 files for TTY")
   245  	}
   246  	var tty *fd.FD
   247  	if len(args.Files) == 1 {
   248  		var err error
   249  		tty, err = fd.NewFromFile(args.Files[0])
   250  		if err != nil {
   251  			return fmt.Errorf("error dup'ing TTY file: %w", err)
   252  		}
   253  	}
   254  	return cm.l.createSubcontainer(args.CID, tty)
   255  }
   256  
   257  // StartArgs contains arguments to the Start method.
   258  type StartArgs struct {
   259  	// Spec is the spec of the container to start.
   260  	Spec *specs.Spec
   261  
   262  	// Config is the runsc-specific configuration for the sandbox.
   263  	Conf *config.Config
   264  
   265  	// CID is the ID of the container to start.
   266  	CID string
   267  
   268  	// NumOverlayFilestoreFDs is the number of overlay filestore FDs donated.
   269  	// Optionally configured with the overlay2 flag.
   270  	NumOverlayFilestoreFDs int
   271  
   272  	// OverlayMediums contains information about how the gofer mounts have been
   273  	// overlaid. The first entry is for rootfs and the following entries are for
   274  	// bind mounts in Spec.Mounts (in the same order).
   275  	OverlayMediums []OverlayMedium
   276  
   277  	// FilePayload contains, in order:
   278  	//   * stdin, stdout, and stderr (optional: if terminal is disabled).
   279  	//   * file descriptors to overlay-backing host files (optional: for overlay2).
   280  	//   * file descriptors to connect to gofer to serve the root filesystem.
   281  	urpc.FilePayload
   282  }
   283  
   284  // StartSubcontainer runs a created container within a sandbox.
   285  func (cm *containerManager) StartSubcontainer(args *StartArgs, _ *struct{}) error {
   286  	// Validate arguments.
   287  	if args == nil {
   288  		return errors.New("start missing arguments")
   289  	}
   290  	log.Debugf("containerManager.StartSubcontainer, cid: %s, args: %+v", args.CID, args)
   291  	if args.Spec == nil {
   292  		return errors.New("start arguments missing spec")
   293  	}
   294  	if args.Conf == nil {
   295  		return errors.New("start arguments missing config")
   296  	}
   297  	if args.CID == "" {
   298  		return errors.New("start argument missing container ID")
   299  	}
   300  	expectedFDs := 1 // At least one FD for the root filesystem.
   301  	expectedFDs += args.NumOverlayFilestoreFDs
   302  	if !args.Spec.Process.Terminal {
   303  		expectedFDs += 3
   304  	}
   305  	if len(args.Files) < expectedFDs {
   306  		return fmt.Errorf("start arguments must contain at least %d FDs, but only got %d", expectedFDs, len(args.Files))
   307  	}
   308  
   309  	// All validation passed, logs the spec for debugging.
   310  	specutils.LogSpecDebug(args.Spec, args.Conf.OCISeccomp)
   311  
   312  	goferFiles := args.Files
   313  	var stdios []*fd.FD
   314  	if !args.Spec.Process.Terminal {
   315  		// When not using a terminal, stdios come as the first 3 files in the
   316  		// payload.
   317  		var err error
   318  		stdios, err = fd.NewFromFiles(goferFiles[:3])
   319  		if err != nil {
   320  			return fmt.Errorf("error dup'ing stdio files: %w", err)
   321  		}
   322  		goferFiles = goferFiles[3:]
   323  	}
   324  	defer func() {
   325  		for _, fd := range stdios {
   326  			_ = fd.Close()
   327  		}
   328  	}()
   329  
   330  	var overlayFilestoreFDs []*fd.FD
   331  	for i := 0; i < args.NumOverlayFilestoreFDs; i++ {
   332  		overlayFilestoreFD, err := fd.NewFromFile(goferFiles[i])
   333  		if err != nil {
   334  			return fmt.Errorf("error dup'ing overlay filestore file: %w", err)
   335  		}
   336  		overlayFilestoreFDs = append(overlayFilestoreFDs, overlayFilestoreFD)
   337  	}
   338  	goferFiles = goferFiles[args.NumOverlayFilestoreFDs:]
   339  
   340  	goferFDs, err := fd.NewFromFiles(goferFiles)
   341  	if err != nil {
   342  		return fmt.Errorf("error dup'ing gofer files: %w", err)
   343  	}
   344  	defer func() {
   345  		for _, fd := range goferFDs {
   346  			_ = fd.Close()
   347  		}
   348  	}()
   349  
   350  	if err := cm.l.startSubcontainer(args.Spec, args.Conf, args.CID, stdios, goferFDs, overlayFilestoreFDs, args.OverlayMediums); err != nil {
   351  		log.Debugf("containerManager.StartSubcontainer failed, cid: %s, args: %+v, err: %v", args.CID, args, err)
   352  		return err
   353  	}
   354  	log.Debugf("Container started, cid: %s", args.CID)
   355  	return nil
   356  }
   357  
   358  // DestroySubcontainer stops a container if it is still running and cleans up
   359  // its filesystem.
   360  func (cm *containerManager) DestroySubcontainer(cid *string, _ *struct{}) error {
   361  	log.Debugf("containerManager.DestroySubcontainer, cid: %s", *cid)
   362  	return cm.l.destroySubcontainer(*cid)
   363  }
   364  
   365  // ExecuteAsync starts running a command on a created or running sandbox. It
   366  // returns the PID of the new process.
   367  func (cm *containerManager) ExecuteAsync(args *control.ExecArgs, pid *int32) error {
   368  	log.Debugf("containerManager.ExecuteAsync, cid: %s, args: %+v", args.ContainerID, args)
   369  	tgid, err := cm.l.executeAsync(args)
   370  	if err != nil {
   371  		log.Debugf("containerManager.ExecuteAsync failed, cid: %s, args: %+v, err: %v", args.ContainerID, args, err)
   372  		return err
   373  	}
   374  	*pid = int32(tgid)
   375  	return nil
   376  }
   377  
   378  // Checkpoint pauses a sandbox and saves its state.
   379  func (cm *containerManager) Checkpoint(o *control.SaveOpts, _ *struct{}) error {
   380  	log.Debugf("containerManager.Checkpoint")
   381  	// TODO(gvisor.dev/issues/6243): save/restore not supported w/ hostinet
   382  	if cm.l.root.conf.Network == config.NetworkHost {
   383  		return errors.New("checkpoint not supported when using hostinet")
   384  	}
   385  
   386  	state := control.State{
   387  		Kernel:   cm.l.k,
   388  		Watchdog: cm.l.watchdog,
   389  	}
   390  	return state.Save(o, nil)
   391  }
   392  
   393  // PortForwardOpts contains options for port forwarding to a port in a
   394  // container.
   395  type PortForwardOpts struct {
   396  	// FilePayload contains one fd for a UDS (or local port) used for port
   397  	// forwarding.
   398  	urpc.FilePayload
   399  
   400  	// ContainerID is the container for the process being executed.
   401  	ContainerID string
   402  	// Port is the port to to forward.
   403  	Port uint16
   404  }
   405  
   406  // PortForward initiates a port forward to the container.
   407  func (cm *containerManager) PortForward(opts *PortForwardOpts, _ *struct{}) error {
   408  	log.Debugf("containerManager.PortForward, cid: %s, port: %d", opts.ContainerID, opts.Port)
   409  	if err := cm.l.portForward(opts); err != nil {
   410  		log.Debugf("containerManager.PortForward failed, opts: %+v, err: %v", opts, err)
   411  		return err
   412  	}
   413  	return nil
   414  }
   415  
   416  // RestoreOpts contains options related to restoring a container's file system.
   417  type RestoreOpts struct {
   418  	// FilePayload contains the state file to be restored, followed by the
   419  	// platform device file if necessary.
   420  	urpc.FilePayload
   421  
   422  	// SandboxID contains the ID of the sandbox.
   423  	SandboxID string
   424  }
   425  
   426  // Restore loads a container from a statefile.
   427  // The container's current kernel is destroyed, a restore environment is
   428  // created, and the kernel is recreated with the restore state file. The
   429  // container then sends the signal to start.
   430  func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {
   431  	log.Debugf("containerManager.Restore")
   432  
   433  	var specFile, deviceFile *os.File
   434  	switch numFiles := len(o.Files); numFiles {
   435  	case 2:
   436  		// The device file is donated to the platform.
   437  		// Can't take ownership away from os.File. dup them to get a new FD.
   438  		fd, err := unix.Dup(int(o.Files[1].Fd()))
   439  		if err != nil {
   440  			return fmt.Errorf("failed to dup file: %v", err)
   441  		}
   442  		deviceFile = os.NewFile(uintptr(fd), "platform device")
   443  		fallthrough
   444  	case 1:
   445  		specFile = o.Files[0]
   446  	case 0:
   447  		return fmt.Errorf("at least one file must be passed to Restore")
   448  	default:
   449  		return fmt.Errorf("at most two files may be passed to Restore")
   450  	}
   451  
   452  	// Pause the kernel while we build a new one.
   453  	cm.l.k.Pause()
   454  
   455  	p, err := createPlatform(cm.l.root.conf, deviceFile)
   456  	if err != nil {
   457  		return fmt.Errorf("creating platform: %v", err)
   458  	}
   459  	k := &kernel.Kernel{
   460  		Platform: p,
   461  	}
   462  	mf, err := createMemoryFile()
   463  	if err != nil {
   464  		return fmt.Errorf("creating memory file: %v", err)
   465  	}
   466  	k.SetMemoryFile(mf)
   467  	networkStack := cm.l.k.RootNetworkNamespace().Stack()
   468  	cm.l.k = k
   469  
   470  	// Set up the restore environment.
   471  	ctx := k.SupervisorContext()
   472  	mntr := newContainerMounter(&cm.l.root, cm.l.k, cm.l.mountHints, cm.l.productName, o.SandboxID)
   473  	ctx, err = mntr.configureRestore(ctx)
   474  	if err != nil {
   475  		return fmt.Errorf("configuring filesystem restore: %v", err)
   476  	}
   477  
   478  	// Prepare to load from the state file.
   479  	if eps, ok := networkStack.(*netstack.Stack); ok {
   480  		stack.StackFromEnv = eps.Stack // FIXME(b/36201077)
   481  	}
   482  	info, err := specFile.Stat()
   483  	if err != nil {
   484  		return err
   485  	}
   486  	if info.Size() == 0 {
   487  		return fmt.Errorf("file cannot be empty")
   488  	}
   489  
   490  	if cm.l.root.conf.ProfileEnable {
   491  		// pprof.Initialize opens /proc/self/maps, so has to be called before
   492  		// installing seccomp filters.
   493  		pprof.Initialize()
   494  	}
   495  
   496  	// Seccomp filters have to be applied before parsing the state file.
   497  	if err := cm.l.installSeccompFilters(); err != nil {
   498  		return err
   499  	}
   500  
   501  	// Load the state.
   502  	loadOpts := state.LoadOpts{Source: specFile}
   503  	if err := loadOpts.Load(ctx, k, nil, networkStack, time.NewCalibratedClocks(), &vfs.CompleteRestoreOptions{}); err != nil {
   504  		return err
   505  	}
   506  
   507  	// Since we have a new kernel we also must make a new watchdog.
   508  	dogOpts := watchdog.DefaultOpts
   509  	dogOpts.TaskTimeoutAction = cm.l.root.conf.WatchdogAction
   510  	dog := watchdog.New(k, dogOpts)
   511  
   512  	// Change the loader fields to reflect the changes made when restoring.
   513  	cm.l.k = k
   514  	cm.l.watchdog = dog
   515  	cm.l.root.procArgs = kernel.CreateProcessArgs{}
   516  	cm.l.restore = true
   517  
   518  	// Reinitialize the sandbox ID and processes map. Note that it doesn't
   519  	// restore the state of multiple containers, nor exec processes.
   520  	cm.l.sandboxID = o.SandboxID
   521  	cm.l.mu.Lock()
   522  	eid := execID{cid: o.SandboxID}
   523  	cm.l.processes = map[execID]*execProcess{
   524  		eid: {
   525  			tg: cm.l.k.GlobalInit(),
   526  		},
   527  	}
   528  	cm.l.mu.Unlock()
   529  
   530  	// Tell the root container to start and wait for the result.
   531  	cm.startChan <- struct{}{}
   532  	if err := <-cm.startResultChan; err != nil {
   533  		return fmt.Errorf("starting sandbox: %v", err)
   534  	}
   535  
   536  	return nil
   537  }
   538  
   539  // Wait waits for the init process in the given container.
   540  func (cm *containerManager) Wait(cid *string, waitStatus *uint32) error {
   541  	log.Debugf("containerManager.Wait, cid: %s", *cid)
   542  	err := cm.l.waitContainer(*cid, waitStatus)
   543  	log.Debugf("containerManager.Wait returned, cid: %s, waitStatus: %#x, err: %v", *cid, *waitStatus, err)
   544  	return err
   545  }
   546  
   547  // WaitPIDArgs are arguments to the WaitPID method.
   548  type WaitPIDArgs struct {
   549  	// PID is the PID in the container's PID namespace.
   550  	PID int32
   551  
   552  	// CID is the container ID.
   553  	CID string
   554  }
   555  
   556  // WaitPID waits for the process with PID 'pid' in the sandbox.
   557  func (cm *containerManager) WaitPID(args *WaitPIDArgs, waitStatus *uint32) error {
   558  	log.Debugf("containerManager.Wait, cid: %s, pid: %d", args.CID, args.PID)
   559  	err := cm.l.waitPID(kernel.ThreadID(args.PID), args.CID, waitStatus)
   560  	log.Debugf("containerManager.Wait, cid: %s, pid: %d, waitStatus: %#x, err: %v", args.CID, args.PID, *waitStatus, err)
   561  	return err
   562  }
   563  
   564  // SignalDeliveryMode enumerates different signal delivery modes.
   565  type SignalDeliveryMode int
   566  
   567  const (
   568  	// DeliverToProcess delivers the signal to the container process with
   569  	// the specified PID. If PID is 0, then the container init process is
   570  	// signaled.
   571  	DeliverToProcess SignalDeliveryMode = iota
   572  
   573  	// DeliverToAllProcesses delivers the signal to all processes in the
   574  	// container. PID must be 0.
   575  	DeliverToAllProcesses
   576  
   577  	// DeliverToForegroundProcessGroup delivers the signal to the
   578  	// foreground process group in the same TTY session as the specified
   579  	// process. If PID is 0, then the signal is delivered to the foreground
   580  	// process group for the TTY for the init process.
   581  	DeliverToForegroundProcessGroup
   582  )
   583  
   584  func (s SignalDeliveryMode) String() string {
   585  	switch s {
   586  	case DeliverToProcess:
   587  		return "Process"
   588  	case DeliverToAllProcesses:
   589  		return "All"
   590  	case DeliverToForegroundProcessGroup:
   591  		return "Foreground Process Group"
   592  	}
   593  	return fmt.Sprintf("unknown signal delivery mode: %d", s)
   594  }
   595  
   596  // SignalArgs are arguments to the Signal method.
   597  type SignalArgs struct {
   598  	// CID is the container ID.
   599  	CID string
   600  
   601  	// Signo is the signal to send to the process.
   602  	Signo int32
   603  
   604  	// PID is the process ID in the given container that will be signaled,
   605  	// relative to the root PID namespace, not the container's.
   606  	// If 0, the root container will be signalled.
   607  	PID int32
   608  
   609  	// Mode is the signal delivery mode.
   610  	Mode SignalDeliveryMode
   611  }
   612  
   613  // Signal sends a signal to one or more processes in a container. If args.PID
   614  // is 0, then the container init process is used. Depending on the
   615  // args.SignalDeliveryMode option, the signal may be sent directly to the
   616  // indicated process, to all processes in the container, or to the foreground
   617  // process group.
   618  func (cm *containerManager) Signal(args *SignalArgs, _ *struct{}) error {
   619  	log.Debugf("containerManager.Signal: cid: %s, PID: %d, signal: %d, mode: %v", args.CID, args.PID, args.Signo, args.Mode)
   620  	return cm.l.signal(args.CID, args.PID, args.Signo, args.Mode)
   621  }
   622  
   623  // CreateTraceSessionArgs are arguments to the CreateTraceSession method.
   624  type CreateTraceSessionArgs struct {
   625  	Config seccheck.SessionConfig
   626  	Force  bool
   627  	urpc.FilePayload
   628  }
   629  
   630  // CreateTraceSession creates a new trace session.
   631  func (cm *containerManager) CreateTraceSession(args *CreateTraceSessionArgs, _ *struct{}) error {
   632  	log.Debugf("containerManager.CreateTraceSession: config: %+v", args.Config)
   633  	for i, sinkFile := range args.Files {
   634  		if sinkFile != nil {
   635  			fd, err := fd.NewFromFile(sinkFile)
   636  			if err != nil {
   637  				return err
   638  			}
   639  			args.Config.Sinks[i].FD = fd
   640  		}
   641  	}
   642  	return seccheck.Create(&args.Config, args.Force)
   643  }
   644  
   645  // DeleteTraceSession deletes an existing trace session.
   646  func (cm *containerManager) DeleteTraceSession(name *string, _ *struct{}) error {
   647  	log.Debugf("containerManager.DeleteTraceSession: name: %q", *name)
   648  	return seccheck.Delete(*name)
   649  }
   650  
   651  // ListTraceSessions lists trace sessions.
   652  func (cm *containerManager) ListTraceSessions(_ *struct{}, out *[]seccheck.SessionConfig) error {
   653  	log.Debugf("containerManager.ListTraceSessions")
   654  	seccheck.List(out)
   655  	return nil
   656  }
   657  
   658  // ProcfsDump dumps procfs state of the sandbox.
   659  func (cm *containerManager) ProcfsDump(_ *struct{}, out *[]procfs.ProcessProcfsDump) error {
   660  	log.Debugf("containerManager.ProcfsDump")
   661  	ts := cm.l.k.TaskSet()
   662  	pidns := ts.Root
   663  	*out = make([]procfs.ProcessProcfsDump, 0, len(cm.l.processes))
   664  	for _, tg := range pidns.ThreadGroups() {
   665  		pid := pidns.IDOfThreadGroup(tg)
   666  		procDump, err := procfs.Dump(tg.Leader(), pid, pidns)
   667  		if err != nil {
   668  			log.Warningf("skipping procfs dump for PID %s: %v", pid, err)
   669  			continue
   670  		}
   671  		*out = append(*out, procDump)
   672  	}
   673  	return nil
   674  }