gvisor.dev/gvisor@v0.0.0-20240520182842-f9d4d51c7e0f/runsc/container/multi_container_test.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package container
    16  
    17  import (
    18  	"fmt"
    19  	"io/ioutil"
    20  	"math"
    21  	"os"
    22  	"path"
    23  	"path/filepath"
    24  	"reflect"
    25  	"strings"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/cenkalti/backoff"
    30  	specs "github.com/opencontainers/runtime-spec/specs-go"
    31  	"golang.org/x/sys/unix"
    32  	"gvisor.dev/gvisor/pkg/cleanup"
    33  	"gvisor.dev/gvisor/pkg/sentry/control"
    34  	"gvisor.dev/gvisor/pkg/sentry/kernel"
    35  	"gvisor.dev/gvisor/pkg/sentry/pgalloc"
    36  	"gvisor.dev/gvisor/pkg/state/statefile"
    37  	"gvisor.dev/gvisor/pkg/sync"
    38  	"gvisor.dev/gvisor/pkg/test/testutil"
    39  	"gvisor.dev/gvisor/runsc/boot"
    40  	"gvisor.dev/gvisor/runsc/config"
    41  	"gvisor.dev/gvisor/runsc/specutils"
    42  )
    43  
    44  var sleepCmd = []string{"/bin/sleep", "1000"}
    45  
    46  func createSpecs(cmds ...[]string) ([]*specs.Spec, []string) {
    47  	var specs []*specs.Spec
    48  	var ids []string
    49  	rootID := testutil.RandomContainerID()
    50  
    51  	for i, cmd := range cmds {
    52  		spec := testutil.NewSpecWithArgs(cmd...)
    53  		if i == 0 {
    54  			spec.Annotations = map[string]string{
    55  				specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox,
    56  			}
    57  			ids = append(ids, rootID)
    58  		} else {
    59  			spec.Annotations = map[string]string{
    60  				specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer,
    61  				specutils.ContainerdSandboxIDAnnotation:     rootID,
    62  			}
    63  			ids = append(ids, testutil.RandomContainerID())
    64  		}
    65  		specs = append(specs, spec)
    66  	}
    67  	return specs, ids
    68  }
    69  
    70  func startContainers(conf *config.Config, specs []*specs.Spec, ids []string) ([]*Container, func(), error) {
    71  	if len(conf.RootDir) == 0 {
    72  		panic("conf.RootDir not set. Call testutil.SetupRootDir() to set.")
    73  	}
    74  
    75  	cu := cleanup.Cleanup{}
    76  	defer cu.Clean()
    77  
    78  	var containers []*Container
    79  	for i, spec := range specs {
    80  		bundleDir, cleanup, err := testutil.SetupBundleDir(spec)
    81  		if err != nil {
    82  			return nil, nil, fmt.Errorf("error setting up container: %v", err)
    83  		}
    84  		cu.Add(cleanup)
    85  
    86  		args := Args{
    87  			ID:        ids[i],
    88  			Spec:      spec,
    89  			BundleDir: bundleDir,
    90  		}
    91  		cont, err := New(conf, args)
    92  		if err != nil {
    93  			return nil, nil, fmt.Errorf("error creating container: %v", err)
    94  		}
    95  		cu.Add(func() { cont.Destroy() })
    96  		containers = append(containers, cont)
    97  
    98  		if err := cont.Start(conf); err != nil {
    99  			return nil, nil, fmt.Errorf("error starting container: %v", err)
   100  		}
   101  	}
   102  
   103  	return containers, cu.Release(), nil
   104  }
   105  
   106  func restoreContainers(conf *config.Config, specs []*specs.Spec, ids []string, imagePath string) ([]*Container, func(), error) {
   107  	if len(conf.RootDir) == 0 {
   108  		panic("conf.RootDir not set. Call testutil.SetupRootDir() to set.")
   109  	}
   110  
   111  	cu := cleanup.Cleanup{}
   112  	defer cu.Clean()
   113  
   114  	var containers []*Container
   115  	for i, spec := range specs {
   116  		bundleDir, cleanup, err := testutil.SetupBundleDir(spec)
   117  		if err != nil {
   118  			return nil, nil, fmt.Errorf("error setting up container: %v", err)
   119  		}
   120  		cu.Add(cleanup)
   121  
   122  		args := Args{
   123  			ID:        ids[i],
   124  			Spec:      spec,
   125  			BundleDir: bundleDir,
   126  		}
   127  		cont, err := New(conf, args)
   128  		if err != nil {
   129  			return nil, nil, fmt.Errorf("error creating container: %v", err)
   130  		}
   131  		cu.Add(func() { cont.Destroy() })
   132  		containers = append(containers, cont)
   133  
   134  		if err := cont.Restore(conf, imagePath, false /* direct */); err != nil {
   135  			return nil, nil, fmt.Errorf("error restoring container: %v", err)
   136  		}
   137  
   138  		time.Sleep(100 * time.Millisecond)
   139  	}
   140  
   141  	return containers, cu.Release(), nil
   142  }
   143  
   144  type execDesc struct {
   145  	c    *Container
   146  	cmd  []string
   147  	name string
   148  	want int
   149  	err  string
   150  }
   151  
   152  func execMany(t *testing.T, conf *config.Config, execs []execDesc) {
   153  	for _, exec := range execs {
   154  		t.Run(exec.name, func(t *testing.T) {
   155  			args := &control.ExecArgs{Argv: exec.cmd}
   156  			if ws, err := exec.c.executeSync(conf, args); err != nil {
   157  				if len(exec.err) == 0 || !strings.Contains(err.Error(), exec.err) {
   158  					t.Errorf("error executing %+v: %v", args, err)
   159  				}
   160  			} else if len(exec.err) > 0 {
   161  				t.Errorf("exec %q didn't fail as expected", exec.cmd)
   162  			} else if ws.ExitStatus() != exec.want {
   163  				t.Errorf("exec %q got exit status: %d, want: %d", exec.cmd, ws.ExitStatus(), exec.want)
   164  			}
   165  		})
   166  	}
   167  }
   168  
   169  func createSharedMount(mount specs.Mount, name string, pod ...*specs.Spec) {
   170  	numContainers := 0
   171  	for _, spec := range pod {
   172  		for i := range spec.Mounts {
   173  			if spec.Mounts[i].Source == mount.Source {
   174  				numContainers++
   175  				break
   176  			}
   177  		}
   178  	}
   179  	share := "container"
   180  	if numContainers > 1 {
   181  		share = "pod"
   182  	}
   183  	for _, spec := range pod {
   184  		spec.Annotations[boot.MountPrefix+name+".source"] = mount.Source
   185  		spec.Annotations[boot.MountPrefix+name+".type"] = "tmpfs"
   186  		spec.Annotations[boot.MountPrefix+name+".share"] = share
   187  		if len(mount.Options) > 0 {
   188  			spec.Annotations[boot.MountPrefix+name+".options"] = strings.Join(mount.Options, ",")
   189  		}
   190  	}
   191  }
   192  
   193  func testSharedMount(t *testing.T, tester func(t *testing.T, conf *config.Config, sourceDir string, mntType string)) {
   194  	// Shared mounts can be tmpfs or bind types. Test both.
   195  	for _, mntType := range []string{"tmpfs", "bind"} {
   196  		t.Run(mntType, func(t *testing.T) {
   197  			// We are interested in seeing how shared mounts interplay with --overlay2.
   198  			for _, ovl := range []string{"none", "root:self", "all:memory"} {
   199  				t.Run(ovl, func(t *testing.T) {
   200  					conf := testutil.TestConfig(t)
   201  					conf.Overlay2.Set(ovl)
   202  					rootDir, cleanup, err := testutil.SetupRootDir()
   203  					if err != nil {
   204  						t.Fatalf("error creating root dir: %v", err)
   205  					}
   206  					defer cleanup()
   207  					conf.RootDir = rootDir
   208  
   209  					sourceDir, err := ioutil.TempDir(testutil.TmpDir(), "mntSrc")
   210  					if err != nil {
   211  						t.Fatalf("ioutil.TempDir() failed: %v", err)
   212  					}
   213  					defer os.RemoveAll(sourceDir)
   214  
   215  					tester(t, conf, sourceDir, mntType)
   216  				})
   217  			}
   218  		})
   219  	}
   220  }
   221  
   222  // TestMultiContainerSanity checks that it is possible to run 2 dead-simple
   223  // containers in the same sandbox.
   224  func TestMultiContainerSanity(t *testing.T) {
   225  	for name, conf := range configs(t, false /* noOverlay */) {
   226  		t.Run(name, func(t *testing.T) {
   227  			rootDir, cleanup, err := testutil.SetupRootDir()
   228  			if err != nil {
   229  				t.Fatalf("error creating root dir: %v", err)
   230  			}
   231  			defer cleanup()
   232  			conf.RootDir = rootDir
   233  
   234  			// Setup the containers.
   235  			specs, ids := createSpecs(sleepCmd, sleepCmd)
   236  			containers, cleanup, err := startContainers(conf, specs, ids)
   237  			if err != nil {
   238  				t.Fatalf("error starting containers: %v", err)
   239  			}
   240  			defer cleanup()
   241  
   242  			// Check via ps that multiple processes are running.
   243  			expectedPL := []*control.Process{
   244  				newProcessBuilder().PID(1).PPID(0).Cmd("sleep").Process(),
   245  			}
   246  			if err := waitForProcessList(containers[0], expectedPL); err != nil {
   247  				t.Errorf("failed to wait for sleep to start: %v", err)
   248  			}
   249  			expectedPL = []*control.Process{
   250  				newProcessBuilder().PID(2).PPID(0).Cmd("sleep").Process(),
   251  			}
   252  			if err := waitForProcessList(containers[1], expectedPL); err != nil {
   253  				t.Errorf("failed to wait for sleep to start: %v", err)
   254  			}
   255  		})
   256  	}
   257  }
   258  
   259  // TestMultiPIDNS checks that it is possible to run 2 dead-simple containers in
   260  // the same sandbox with different pidns.
   261  func TestMultiPIDNS(t *testing.T) {
   262  	for name, conf := range configs(t, true /* noOverlay */) {
   263  		t.Run(name, func(t *testing.T) {
   264  			rootDir, cleanup, err := testutil.SetupRootDir()
   265  			if err != nil {
   266  				t.Fatalf("error creating root dir: %v", err)
   267  			}
   268  			defer cleanup()
   269  			conf.RootDir = rootDir
   270  
   271  			// Setup the containers.
   272  			testSpecs, ids := createSpecs(sleepCmd, sleepCmd)
   273  			testSpecs[1].Linux = &specs.Linux{
   274  				Namespaces: []specs.LinuxNamespace{
   275  					{
   276  						Type: "pid",
   277  					},
   278  				},
   279  			}
   280  
   281  			containers, cleanup, err := startContainers(conf, testSpecs, ids)
   282  			if err != nil {
   283  				t.Fatalf("error starting containers: %v", err)
   284  			}
   285  			defer cleanup()
   286  
   287  			// Check via ps that multiple processes are running.
   288  			expectedPL := []*control.Process{
   289  				newProcessBuilder().PID(1).Cmd("sleep").Process(),
   290  			}
   291  			if err := waitForProcessList(containers[0], expectedPL); err != nil {
   292  				t.Errorf("failed to wait for sleep to start: %v", err)
   293  			}
   294  			expectedPL = []*control.Process{
   295  				newProcessBuilder().PID(2).Cmd("sleep").Process(),
   296  			}
   297  			if err := waitForProcessList(containers[1], expectedPL); err != nil {
   298  				t.Errorf("failed to wait for sleep to start: %v", err)
   299  			}
   300  
   301  			// Root container runs in the root PID namespace and can see all
   302  			// processes.
   303  			expectedPL = []*control.Process{
   304  				newProcessBuilder().PID(1).Cmd("sleep").Process(),
   305  				newProcessBuilder().PID(2).Cmd("sleep").Process(),
   306  				newProcessBuilder().Cmd("ps").Process(),
   307  			}
   308  			got, err := execPS(conf, containers[0])
   309  			if err != nil {
   310  				t.Fatal(err)
   311  			}
   312  			if !procListsEqual(got, expectedPL) {
   313  				t.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expectedPL))
   314  			}
   315  
   316  			expectedPL = []*control.Process{
   317  				newProcessBuilder().PID(1).Cmd("sleep").Process(),
   318  				newProcessBuilder().Cmd("ps").Process(),
   319  			}
   320  			got, err = execPS(conf, containers[1])
   321  			if err != nil {
   322  				t.Fatal(err)
   323  			}
   324  			if !procListsEqual(got, expectedPL) {
   325  				t.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expectedPL))
   326  			}
   327  		})
   328  	}
   329  }
   330  
   331  // TestMultiPIDNSPath checks the pidns path.
   332  func TestMultiPIDNSPath(t *testing.T) {
   333  	for name, conf := range configs(t, true /* noOverlay */) {
   334  		t.Run(name, func(t *testing.T) {
   335  			rootDir, cleanup, err := testutil.SetupRootDir()
   336  			if err != nil {
   337  				t.Fatalf("error creating root dir: %v", err)
   338  			}
   339  			defer cleanup()
   340  			conf.RootDir = rootDir
   341  
   342  			// Setup the containers.
   343  			testSpecs, ids := createSpecs(sleepCmd, sleepCmd, sleepCmd)
   344  			testSpecs[0].Linux = &specs.Linux{
   345  				Namespaces: []specs.LinuxNamespace{
   346  					{
   347  						Type: "pid",
   348  						Path: "/proc/1/ns/pid",
   349  					},
   350  				},
   351  			}
   352  			testSpecs[1].Linux = &specs.Linux{
   353  				Namespaces: []specs.LinuxNamespace{
   354  					{
   355  						Type: "pid",
   356  						Path: "/proc/1/ns/pid",
   357  					},
   358  				},
   359  			}
   360  			testSpecs[2].Linux = &specs.Linux{
   361  				Namespaces: []specs.LinuxNamespace{
   362  					{
   363  						Type: "pid",
   364  						Path: "/proc/2/ns/pid",
   365  					},
   366  				},
   367  			}
   368  
   369  			containers, cleanup, err := startContainers(conf, testSpecs, ids)
   370  			if err != nil {
   371  				t.Fatalf("error starting containers: %v", err)
   372  			}
   373  			defer cleanup()
   374  
   375  			// Check via ps that multiple processes are running.
   376  			expectedPL := []*control.Process{
   377  				newProcessBuilder().PID(1).PPID(0).Cmd("sleep").Process(),
   378  			}
   379  			if err := waitForProcessList(containers[0], expectedPL); err != nil {
   380  				t.Errorf("failed to wait for sleep to start: %v", err)
   381  			}
   382  			expectedPL = []*control.Process{
   383  				newProcessBuilder().PID(2).PPID(0).Cmd("sleep").Process(),
   384  			}
   385  			if err := waitForProcessList(containers[1], expectedPL); err != nil {
   386  				t.Errorf("failed to wait for sleep to start: %v", err)
   387  			}
   388  			expectedPL = []*control.Process{
   389  				newProcessBuilder().PID(3).PPID(0).Cmd("sleep").Process(),
   390  			}
   391  			if err := waitForProcessList(containers[2], expectedPL); err != nil {
   392  				t.Errorf("failed to wait for sleep to start: %v", err)
   393  			}
   394  
   395  			// Root container runs in the root PID namespace and can see all
   396  			// processes.
   397  			expectedPL = []*control.Process{
   398  				newProcessBuilder().PID(1).Cmd("sleep").Process(),
   399  				newProcessBuilder().PID(2).Cmd("sleep").Process(),
   400  				newProcessBuilder().PID(3).Cmd("sleep").Process(),
   401  				newProcessBuilder().Cmd("ps").Process(),
   402  			}
   403  			got, err := execPS(conf, containers[0])
   404  			if err != nil {
   405  				t.Fatal(err)
   406  			}
   407  			if !procListsEqual(got, expectedPL) {
   408  				t.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expectedPL))
   409  			}
   410  
   411  			// Container 1 runs in the same PID namespace as the root container.
   412  			expectedPL = []*control.Process{
   413  				newProcessBuilder().PID(1).Cmd("sleep").Process(),
   414  				newProcessBuilder().PID(2).Cmd("sleep").Process(),
   415  				newProcessBuilder().PID(3).Cmd("sleep").Process(),
   416  				newProcessBuilder().Cmd("ps").Process(),
   417  			}
   418  			got, err = execPS(conf, containers[1])
   419  			if err != nil {
   420  				t.Fatal(err)
   421  			}
   422  			if !procListsEqual(got, expectedPL) {
   423  				t.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expectedPL))
   424  			}
   425  
   426  			// Container 2 runs on its own namespace.
   427  			expectedPL = []*control.Process{
   428  				newProcessBuilder().PID(1).Cmd("sleep").Process(),
   429  				newProcessBuilder().Cmd("ps").Process(),
   430  			}
   431  			got, err = execPS(conf, containers[2])
   432  			if err != nil {
   433  				t.Fatal(err)
   434  			}
   435  			if !procListsEqual(got, expectedPL) {
   436  				t.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expectedPL))
   437  			}
   438  		})
   439  	}
   440  }
   441  
   442  // TestMultiPIDNSKill kills processes using PID when containers are using
   443  // different PID namespaces to ensure PID is taken from the root namespace.
   444  func TestMultiPIDNSKill(t *testing.T) {
   445  	app, err := testutil.FindFile("test/cmd/test_app/test_app")
   446  	if err != nil {
   447  		t.Fatal("error finding test_app:", err)
   448  	}
   449  
   450  	for name, conf := range configs(t, true /* noOverlay */) {
   451  		t.Run(name, func(t *testing.T) {
   452  			rootDir, cleanup, err := testutil.SetupRootDir()
   453  			if err != nil {
   454  				t.Fatalf("error creating root dir: %v", err)
   455  			}
   456  			defer cleanup()
   457  			conf.RootDir = rootDir
   458  
   459  			// Setup the containers.
   460  			cmd := []string{app, "task-tree", "--depth=1", "--width=2", "--pause=true"}
   461  			const processes = 3
   462  			testSpecs, ids := createSpecs(cmd, cmd)
   463  
   464  			testSpecs[1].Linux = &specs.Linux{
   465  				Namespaces: []specs.LinuxNamespace{
   466  					{
   467  						Type: "pid",
   468  					},
   469  				},
   470  			}
   471  
   472  			containers, cleanup, err := startContainers(conf, testSpecs, ids)
   473  			if err != nil {
   474  				t.Fatalf("error starting containers: %v", err)
   475  			}
   476  			defer cleanup()
   477  
   478  			// Wait until all processes are created.
   479  			for _, c := range containers {
   480  				if err := waitForProcessCount(c, processes); err != nil {
   481  					t.Fatalf("error waitting for processes: %v", err)
   482  				}
   483  			}
   484  
   485  			for i, c := range containers {
   486  				// First, kill a process that belongs to the container.
   487  				procs, err := c.Processes()
   488  				if err != nil {
   489  					t.Fatalf("container.Processes(): %v", err)
   490  				}
   491  				t.Logf("Container %q procs: %s", c.ID, procListToString(procs))
   492  				pidToKill := procs[processes-1].PID
   493  				t.Logf("PID to kill: %d", pidToKill)
   494  				if err := c.SignalProcess(unix.SIGKILL, int32(pidToKill)); err != nil {
   495  					t.Errorf("container.SignalProcess: %v", err)
   496  				}
   497  				// Wait for the process to get killed.
   498  				if err := waitForProcessCount(c, processes-1); err != nil {
   499  					t.Fatalf("error waitting for processes: %v", err)
   500  				}
   501  				procs, err = c.Processes()
   502  				if err != nil {
   503  					t.Fatalf("container.Processes(): %v", err)
   504  				}
   505  				t.Logf("Container %q procs after kill: %s", c.ID, procListToString(procs))
   506  				for _, proc := range procs {
   507  					if proc.PID == pidToKill {
   508  						t.Errorf("process %d not killed: %+v", pidToKill, proc)
   509  					}
   510  				}
   511  
   512  				// Next, attempt to kill a process from another container and check that
   513  				// it fails.
   514  				other := containers[(i+1)%len(containers)]
   515  				procs, err = other.Processes()
   516  				if err != nil {
   517  					t.Fatalf("container.Processes(): %v", err)
   518  				}
   519  				t.Logf("Other container %q procs: %s", other.ID, procListToString(procs))
   520  
   521  				pidToKill = procs[len(procs)-1].PID
   522  				t.Logf("PID that should not be killed: %d", pidToKill)
   523  				err = c.SignalProcess(unix.SIGKILL, int32(pidToKill))
   524  				if err == nil {
   525  					t.Fatalf("killing another container's process should fail")
   526  				}
   527  				if !strings.Contains(err.Error(), "belongs to a different container") {
   528  					t.Errorf("wrong error message from killing another container's: %v", err)
   529  				}
   530  			}
   531  		})
   532  	}
   533  }
   534  
   535  // TestMultiPIDNSRoot checks that the sandbox PID namespace can be used to
   536  // reference the root PID namespace of the sandbox.
   537  func TestMultiPIDNSRoot(t *testing.T) {
   538  	for name, conf := range configs(t, true /* noOverlay */) {
   539  		t.Run(name, func(t *testing.T) {
   540  			rootDir, cleanup, err := testutil.SetupRootDir()
   541  			if err != nil {
   542  				t.Fatalf("error creating root dir: %v", err)
   543  			}
   544  			defer cleanup()
   545  			conf.RootDir = rootDir
   546  
   547  			// Setup the containers. One in the root PID namespace and another in a
   548  			// sub-namespace.
   549  			testSpecs, ids := createSpecs(sleepCmd, sleepCmd, sleepCmd)
   550  			testSpecs[1].Linux = &specs.Linux{
   551  				Namespaces: []specs.LinuxNamespace{
   552  					{
   553  						Type: "pid",
   554  						Path: "/proc/1/ns/pid",
   555  					},
   556  				},
   557  			}
   558  
   559  			// Start 2 containers first, and use the 3rd to join the sandbox pidns.
   560  			delayedSpec, delayedID := testSpecs[2], ids[2]
   561  			testSpecs = testSpecs[:2]
   562  			ids = ids[:2]
   563  
   564  			containers, cleanup, err := startContainers(conf, testSpecs, ids)
   565  			if err != nil {
   566  				t.Fatalf("error starting containers: %v", err)
   567  			}
   568  			defer cleanup()
   569  
   570  			delayedSpec.Linux = &specs.Linux{
   571  				Namespaces: []specs.LinuxNamespace{
   572  					{
   573  						Type: "pid",
   574  						Path: fmt.Sprintf("/proc/%d/ns/pid", containers[0].SandboxPid()),
   575  					},
   576  				},
   577  			}
   578  			delayed, cleanup, err := startContainers(conf, []*specs.Spec{delayedSpec}, []string{delayedID})
   579  			if err != nil {
   580  				t.Fatalf("error starting sub-container: %v", err)
   581  			}
   582  			defer cleanup()
   583  
   584  			// Wait for all container processes to be up and running.
   585  			expectedPL := []*control.Process{
   586  				newProcessBuilder().PID(1).PPID(0).Cmd("sleep").Process(),
   587  			}
   588  			if err := waitForProcessList(containers[0], expectedPL); err != nil {
   589  				t.Errorf("failed to wait for sleep to start: %v", err)
   590  			}
   591  			expectedPL = []*control.Process{
   592  				newProcessBuilder().PID(2).PPID(0).Cmd("sleep").Process(),
   593  			}
   594  			if err := waitForProcessList(containers[1], expectedPL); err != nil {
   595  				t.Fatalf("failed to wait for sleep to start: %v", err)
   596  			}
   597  			expectedPL = []*control.Process{
   598  				newProcessBuilder().PID(3).PPID(0).Cmd("sleep").Process(),
   599  			}
   600  			if err := waitForProcessList(delayed[0], expectedPL); err != nil {
   601  				t.Fatalf("failed to wait for sleep to start: %v", err)
   602  			}
   603  
   604  			// Check that delayer container is running in the root PID namespace and
   605  			// can see all other processes.
   606  			expectedPL = []*control.Process{
   607  				newProcessBuilder().PID(1).Cmd("sleep").Process(),
   608  				newProcessBuilder().PID(2).Cmd("sleep").Process(),
   609  				newProcessBuilder().PID(3).Cmd("sleep").Process(),
   610  				newProcessBuilder().Cmd("ps").Process(),
   611  			}
   612  			if got, err := execPS(conf, delayed[0]); err != nil {
   613  				t.Fatal(err)
   614  			} else if !procListsEqual(got, expectedPL) {
   615  				t.Fatalf("container got process list: %s, want: %s", procListToString(got), procListToString(expectedPL))
   616  			}
   617  		})
   618  	}
   619  }
   620  
   621  func TestMultiContainerWait(t *testing.T) {
   622  	rootDir, cleanup, err := testutil.SetupRootDir()
   623  	if err != nil {
   624  		t.Fatalf("error creating root dir: %v", err)
   625  	}
   626  	defer cleanup()
   627  
   628  	conf := testutil.TestConfig(t)
   629  	conf.RootDir = rootDir
   630  
   631  	// The first container should run the entire duration of the test.
   632  	// We'll wait on the second container, which is much shorter lived.
   633  	cmd2 := []string{"sleep", "1"}
   634  	specs, ids := createSpecs(sleepCmd, cmd2)
   635  
   636  	containers, cleanup, err := startContainers(conf, specs, ids)
   637  	if err != nil {
   638  		t.Fatalf("error starting containers: %v", err)
   639  	}
   640  	defer cleanup()
   641  
   642  	// Check that we can wait for the sub-container.
   643  	c := containers[1]
   644  	if ws, err := c.Wait(); err != nil {
   645  		t.Errorf("failed to wait for process %s: %v", c.Spec.Process.Args, err)
   646  	} else if es := ws.ExitStatus(); es != 0 {
   647  		t.Errorf("process %s exited with non-zero status %d", c.Spec.Process.Args, es)
   648  	}
   649  	if _, err := c.Wait(); err != nil {
   650  		t.Errorf("wait for stopped container %s shouldn't fail: %v", c.Spec.Process.Args, err)
   651  	}
   652  
   653  	// After Wait returns, ensure that the root container is running and
   654  	// the child has finished.
   655  	expectedPL := []*control.Process{
   656  		newProcessBuilder().Cmd("sleep").PID(1).Process(),
   657  	}
   658  	if err := waitForProcessList(containers[0], expectedPL); err != nil {
   659  		t.Errorf("failed to wait for %q to start: %v", strings.Join(containers[0].Spec.Process.Args, " "), err)
   660  	}
   661  }
   662  
   663  // TestExecWait ensures what we can wait on containers and individual processes
   664  // in the sandbox that have already exited.
   665  func TestExecWait(t *testing.T) {
   666  	rootDir, cleanup, err := testutil.SetupRootDir()
   667  	if err != nil {
   668  		t.Fatalf("error creating root dir: %v", err)
   669  	}
   670  	defer cleanup()
   671  
   672  	conf := testutil.TestConfig(t)
   673  	conf.RootDir = rootDir
   674  
   675  	// The first container should run the entire duration of the test.
   676  	// We'll wait on the second container, which is much shorter lived.
   677  	cmd2 := []string{"sleep", "1"}
   678  	specs, ids := createSpecs(sleepCmd, cmd2)
   679  	containers, cleanup, err := startContainers(conf, specs, ids)
   680  	if err != nil {
   681  		t.Fatalf("error starting containers: %v", err)
   682  	}
   683  	defer cleanup()
   684  
   685  	// Check via ps that process is running.
   686  	expectedPL := []*control.Process{
   687  		newProcessBuilder().Cmd("sleep").Process(),
   688  	}
   689  	if err := waitForProcessList(containers[1], expectedPL); err != nil {
   690  		t.Fatalf("failed to wait for sleep to start: %v", err)
   691  	}
   692  
   693  	// Wait for the second container to finish.
   694  	if err := waitForProcessCount(containers[1], 0); err != nil {
   695  		t.Fatalf("failed to wait for second container to stop: %v", err)
   696  	}
   697  
   698  	// Get the second container exit status.
   699  	if ws, err := containers[1].Wait(); err != nil {
   700  		t.Fatalf("failed to wait for process %s: %v", containers[1].Spec.Process.Args, err)
   701  	} else if es := ws.ExitStatus(); es != 0 {
   702  		t.Fatalf("process %s exited with non-zero status %d", containers[1].Spec.Process.Args, es)
   703  	}
   704  	if _, err := containers[1].Wait(); err != nil {
   705  		t.Fatalf("wait for stopped container %s shouldn't fail: %v", containers[1].Spec.Process.Args, err)
   706  	}
   707  
   708  	// Execute another process in the first container.
   709  	args := &control.ExecArgs{
   710  		Filename:         "/bin/sleep",
   711  		Argv:             []string{"/bin/sleep", "1"},
   712  		WorkingDirectory: "/",
   713  		KUID:             0,
   714  	}
   715  	pid, err := containers[0].Execute(conf, args)
   716  	if err != nil {
   717  		t.Fatalf("error executing: %v", err)
   718  	}
   719  
   720  	// Wait for the exec'd process to exit.
   721  	expectedPL = []*control.Process{
   722  		newProcessBuilder().PID(1).Cmd("sleep").Process(),
   723  	}
   724  	if err := waitForProcessList(containers[0], expectedPL); err != nil {
   725  		t.Fatalf("failed to wait for second container to stop: %v", err)
   726  	}
   727  
   728  	// Get the exit status from the exec'd process.
   729  	if ws, err := containers[0].WaitPID(pid); err != nil {
   730  		t.Fatalf("failed to wait for process %+v with pid %d: %v", args, pid, err)
   731  	} else if es := ws.ExitStatus(); es != 0 {
   732  		t.Fatalf("process %+v exited with non-zero status %d", args, es)
   733  	}
   734  	if _, err := containers[0].WaitPID(pid); err == nil {
   735  		t.Fatalf("wait for stopped process %+v should fail", args)
   736  	}
   737  }
   738  
   739  // TestMultiContainerMount tests that bind mounts can be used with multiple
   740  // containers.
   741  func TestMultiContainerMount(t *testing.T) {
   742  	// 'src != dst' ensures that 'dst' doesn't exist in the host and must be
   743  	// properly mapped inside the container to work.
   744  	src, err := ioutil.TempDir(testutil.TmpDir(), "container")
   745  	if err != nil {
   746  		t.Fatal("ioutil.TempDir failed:", err)
   747  	}
   748  	dst := src + ".dst"
   749  	cmd2 := []string{"touch", filepath.Join(dst, "file")}
   750  
   751  	sps, ids := createSpecs(sleepCmd, cmd2)
   752  	sps[1].Mounts = append(sps[1].Mounts, specs.Mount{
   753  		Source:      src,
   754  		Destination: dst,
   755  		Type:        "bind",
   756  	})
   757  
   758  	// Setup the containers.
   759  	rootDir, cleanup, err := testutil.SetupRootDir()
   760  	if err != nil {
   761  		t.Fatalf("error creating root dir: %v", err)
   762  	}
   763  	defer cleanup()
   764  
   765  	conf := testutil.TestConfig(t)
   766  	conf.RootDir = rootDir
   767  
   768  	containers, cleanup, err := startContainers(conf, sps, ids)
   769  	if err != nil {
   770  		t.Fatalf("error starting containers: %v", err)
   771  	}
   772  	defer cleanup()
   773  
   774  	ws, err := containers[1].Wait()
   775  	if err != nil {
   776  		t.Error("error waiting on container:", err)
   777  	}
   778  	if !ws.Exited() || ws.ExitStatus() != 0 {
   779  		t.Error("container failed, waitStatus:", ws)
   780  	}
   781  }
   782  
   783  // TestMultiContainerSignal checks that it is possible to signal individual
   784  // containers without killing the entire sandbox.
   785  func TestMultiContainerSignal(t *testing.T) {
   786  	for name, conf := range configs(t, false /* noOverlay */) {
   787  		t.Run(name, func(t *testing.T) {
   788  			rootDir, cleanup, err := testutil.SetupRootDir()
   789  			if err != nil {
   790  				t.Fatalf("error creating root dir: %v", err)
   791  			}
   792  			defer cleanup()
   793  			conf.RootDir = rootDir
   794  
   795  			// Setup the containers.
   796  			specs, ids := createSpecs(sleepCmd, sleepCmd)
   797  			containers, cleanup, err := startContainers(conf, specs, ids)
   798  			if err != nil {
   799  				t.Fatalf("error starting containers: %v", err)
   800  			}
   801  			defer cleanup()
   802  
   803  			// Check via ps that container 1 process is running.
   804  			expectedPL := []*control.Process{
   805  				newProcessBuilder().Cmd("sleep").Process(),
   806  			}
   807  			if err := waitForProcessList(containers[1], expectedPL); err != nil {
   808  				t.Errorf("failed to wait for sleep to start: %v", err)
   809  			}
   810  
   811  			// Kill process 2.
   812  			if err := containers[1].SignalContainer(unix.SIGKILL, false); err != nil {
   813  				t.Errorf("failed to kill process 2: %v", err)
   814  			}
   815  
   816  			// Make sure process 1 is still running.
   817  			expectedPL = []*control.Process{
   818  				newProcessBuilder().PID(1).Cmd("sleep").Process(),
   819  			}
   820  			if err := waitForProcessList(containers[0], expectedPL); err != nil {
   821  				t.Errorf("failed to wait for sleep to start: %v", err)
   822  			}
   823  
   824  			// goferPid is reset when container is destroyed.
   825  			goferPid := containers[1].GoferPid
   826  
   827  			// Destroy container and ensure container's gofer process has exited.
   828  			if err := containers[1].Destroy(); err != nil {
   829  				t.Errorf("failed to destroy container: %v", err)
   830  			}
   831  			_, _, err = specutils.RetryEintr(func() (uintptr, uintptr, error) {
   832  				cpid, err := unix.Wait4(goferPid, nil, 0, nil)
   833  				return uintptr(cpid), 0, err
   834  			})
   835  			if err != unix.ECHILD {
   836  				t.Errorf("error waiting for gofer to exit: %v", err)
   837  			}
   838  			// Make sure process 1 is still running.
   839  			if err := waitForProcessList(containers[0], expectedPL); err != nil {
   840  				t.Errorf("failed to wait for sleep to start: %v", err)
   841  			}
   842  
   843  			// Now that process 2 is gone, ensure we get an error trying to
   844  			// signal it again.
   845  			if err := containers[1].SignalContainer(unix.SIGKILL, false); err == nil {
   846  				t.Errorf("container %q shouldn't exist, but we were able to signal it", containers[1].ID)
   847  			}
   848  
   849  			// Kill process 1.
   850  			if err := containers[0].SignalContainer(unix.SIGKILL, false); err != nil {
   851  				t.Errorf("failed to kill process 1: %v", err)
   852  			}
   853  
   854  			// Ensure that container's gofer and sandbox process are no more.
   855  			err = blockUntilWaitable(containers[0].GoferPid)
   856  			if err != nil && err != unix.ECHILD {
   857  				t.Errorf("error waiting for gofer to exit: %v", err)
   858  			}
   859  
   860  			err = blockUntilWaitable(containers[0].Sandbox.Getpid())
   861  			if err != nil && err != unix.ECHILD {
   862  				t.Errorf("error waiting for sandbox to exit: %v", err)
   863  			}
   864  
   865  			// The sentry should be gone, so signaling should yield an error.
   866  			if err := containers[0].SignalContainer(unix.SIGKILL, false); err == nil {
   867  				t.Errorf("sandbox %q shouldn't exist, but we were able to signal it", containers[0].Sandbox.ID)
   868  			}
   869  
   870  			if err := containers[0].Destroy(); err != nil {
   871  				t.Errorf("failed to destroy container: %v", err)
   872  			}
   873  		})
   874  	}
   875  }
   876  
   877  // TestMultiContainerDestroy checks that container are properly cleaned-up when
   878  // they are destroyed.
   879  func TestMultiContainerDestroy(t *testing.T) {
   880  	app, err := testutil.FindFile("test/cmd/test_app/test_app")
   881  	if err != nil {
   882  		t.Fatal("error finding test_app:", err)
   883  	}
   884  
   885  	for name, conf := range configs(t, false /* noOverlay */) {
   886  		t.Run(name, func(t *testing.T) {
   887  			rootDir, cleanup, err := testutil.SetupRootDir()
   888  			if err != nil {
   889  				t.Fatalf("error creating root dir: %v", err)
   890  			}
   891  			defer cleanup()
   892  			conf.RootDir = rootDir
   893  
   894  			// First container will remain intact while the second container is killed.
   895  			podSpecs, ids := createSpecs(
   896  				sleepCmd,
   897  				[]string{app, "fork-bomb"})
   898  
   899  			// Run the fork bomb in a PID namespace to prevent processes to be
   900  			// re-parented to PID=1 in the root container.
   901  			podSpecs[1].Linux = &specs.Linux{
   902  				Namespaces: []specs.LinuxNamespace{{Type: "pid"}},
   903  			}
   904  			containers, cleanup, err := startContainers(conf, podSpecs, ids)
   905  			if err != nil {
   906  				t.Fatalf("error starting containers: %v", err)
   907  			}
   908  			defer cleanup()
   909  
   910  			// Exec more processes to ensure signal all works for exec'd processes too.
   911  			args := &control.ExecArgs{
   912  				Filename: app,
   913  				Argv:     []string{app, "fork-bomb"},
   914  			}
   915  			if _, err := containers[1].Execute(conf, args); err != nil {
   916  				t.Fatalf("error exec'ing: %v", err)
   917  			}
   918  
   919  			// Let it brew...
   920  			time.Sleep(500 * time.Millisecond)
   921  
   922  			if err := containers[1].Destroy(); err != nil {
   923  				t.Fatalf("error destroying container: %v", err)
   924  			}
   925  
   926  			// Check that destroy killed all processes belonging to the container and
   927  			// waited for them to exit before returning.
   928  			pss, err := containers[0].Sandbox.Processes("")
   929  			if err != nil {
   930  				t.Fatalf("error getting process data from sandbox: %v", err)
   931  			}
   932  			expectedPL := []*control.Process{
   933  				newProcessBuilder().PID(1).Cmd("sleep").Process(),
   934  			}
   935  			if !procListsEqual(pss, expectedPL) {
   936  				t.Errorf("container got process list: %s, want: %s: error: %v",
   937  					procListToString(pss), procListToString(expectedPL), err)
   938  			}
   939  
   940  			// Check that cont.Destroy is safe to call multiple times.
   941  			if err := containers[1].Destroy(); err != nil {
   942  				t.Errorf("error destroying container: %v", err)
   943  			}
   944  		})
   945  	}
   946  }
   947  
   948  func TestMultiContainerProcesses(t *testing.T) {
   949  	rootDir, cleanup, err := testutil.SetupRootDir()
   950  	if err != nil {
   951  		t.Fatalf("error creating root dir: %v", err)
   952  	}
   953  	defer cleanup()
   954  
   955  	conf := testutil.TestConfig(t)
   956  	conf.RootDir = rootDir
   957  
   958  	// Note: use curly braces to keep 'sh' process around. Otherwise, shell
   959  	// will just execve into 'sleep' and both containers will look the
   960  	// same.
   961  	specs, ids := createSpecs(
   962  		sleepCmd,
   963  		[]string{"sh", "-c", "{ sleep 1000; }"})
   964  	containers, cleanup, err := startContainers(conf, specs, ids)
   965  	if err != nil {
   966  		t.Fatalf("error starting containers: %v", err)
   967  	}
   968  	defer cleanup()
   969  
   970  	// Check root's container process list doesn't include other containers.
   971  	expectedPL0 := []*control.Process{
   972  		newProcessBuilder().PID(1).Cmd("sleep").Process(),
   973  	}
   974  	if err := waitForProcessList(containers[0], expectedPL0); err != nil {
   975  		t.Errorf("failed to wait for process to start: %v", err)
   976  	}
   977  
   978  	// Same for the other container.
   979  	expectedPL1 := []*control.Process{
   980  		newProcessBuilder().PID(2).Cmd("sh").Process(),
   981  		newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process(),
   982  	}
   983  	if err := waitForProcessList(containers[1], expectedPL1); err != nil {
   984  		t.Errorf("failed to wait for process to start: %v", err)
   985  	}
   986  
   987  	// Now exec into the second container and verify it shows up in the container.
   988  	args := &control.ExecArgs{
   989  		Filename: sleepCmd[0],
   990  		Argv:     sleepCmd,
   991  	}
   992  	if _, err := containers[1].Execute(conf, args); err != nil {
   993  		t.Fatalf("error exec'ing: %v", err)
   994  	}
   995  	expectedPL1 = append(expectedPL1, newProcessBuilder().PID(4).Cmd("sleep").Process())
   996  	if err := waitForProcessList(containers[1], expectedPL1); err != nil {
   997  		t.Errorf("failed to wait for process to start: %v", err)
   998  	}
   999  	// Root container should remain unchanged.
  1000  	if err := waitForProcessList(containers[0], expectedPL0); err != nil {
  1001  		t.Errorf("failed to wait for process to start: %v", err)
  1002  	}
  1003  }
  1004  
  1005  // TestMultiContainerKillAll checks that all process that belong to a container
  1006  // are killed when SIGKILL is sent to *all* processes in that container.
  1007  func TestMultiContainerKillAll(t *testing.T) {
  1008  	rootDir, cleanup, err := testutil.SetupRootDir()
  1009  	if err != nil {
  1010  		t.Fatalf("error creating root dir: %v", err)
  1011  	}
  1012  	defer cleanup()
  1013  
  1014  	conf := testutil.TestConfig(t)
  1015  	conf.RootDir = rootDir
  1016  
  1017  	for _, tc := range []struct {
  1018  		killContainer bool
  1019  	}{
  1020  		{killContainer: true},
  1021  		{killContainer: false},
  1022  	} {
  1023  		app, err := testutil.FindFile("test/cmd/test_app/test_app")
  1024  		if err != nil {
  1025  			t.Fatal("error finding test_app:", err)
  1026  		}
  1027  
  1028  		// First container will remain intact while the second container is killed.
  1029  		specs, ids := createSpecs(
  1030  			[]string{app, "task-tree", "--depth=2", "--width=2"},
  1031  			[]string{app, "task-tree", "--depth=4", "--width=2"})
  1032  		containers, cleanup, err := startContainers(conf, specs, ids)
  1033  		if err != nil {
  1034  			t.Fatalf("error starting containers: %v", err)
  1035  		}
  1036  		defer cleanup()
  1037  
  1038  		// Wait until all processes are created.
  1039  		rootProcCount := int(math.Pow(2, 3) - 1)
  1040  		if err := waitForProcessCount(containers[0], rootProcCount); err != nil {
  1041  			t.Fatalf("error waitting for processes: %v", err)
  1042  		}
  1043  		procCount := int(math.Pow(2, 5) - 1)
  1044  		if err := waitForProcessCount(containers[1], procCount); err != nil {
  1045  			t.Fatalf("error waiting for processes: %v", err)
  1046  		}
  1047  
  1048  		// Exec more processes to ensure signal works for exec'd processes too.
  1049  		args := &control.ExecArgs{
  1050  			Filename: app,
  1051  			Argv:     []string{app, "task-tree", "--depth=2", "--width=2"},
  1052  		}
  1053  		if _, err := containers[1].Execute(conf, args); err != nil {
  1054  			t.Fatalf("error exec'ing: %v", err)
  1055  		}
  1056  		// Wait for these new processes to start.
  1057  		procCount += int(math.Pow(2, 3) - 1)
  1058  		if err := waitForProcessCount(containers[1], procCount); err != nil {
  1059  			t.Fatalf("error waiting for processes: %v", err)
  1060  		}
  1061  
  1062  		if tc.killContainer {
  1063  			// First kill the init process to make the container be stopped with
  1064  			// processes still running inside.
  1065  			if err := containers[1].SignalContainer(unix.SIGKILL, false); err != nil {
  1066  				t.Fatalf("SignalContainer(): %v", err)
  1067  			}
  1068  			op := func() error {
  1069  				c, err := Load(conf.RootDir, FullID{ContainerID: ids[1]}, LoadOpts{})
  1070  				if err != nil {
  1071  					return err
  1072  				}
  1073  				if c.Status != Stopped {
  1074  					return fmt.Errorf("container is not stopped")
  1075  				}
  1076  				return nil
  1077  			}
  1078  			if err := testutil.Poll(op, 5*time.Second); err != nil {
  1079  				t.Fatalf("container did not stop %q: %v", containers[1].ID, err)
  1080  			}
  1081  		}
  1082  
  1083  		c, err := Load(conf.RootDir, FullID{ContainerID: ids[1]}, LoadOpts{})
  1084  		if err != nil {
  1085  			t.Fatalf("failed to load child container %q: %v", ids[1], err)
  1086  		}
  1087  		// Kill'Em All
  1088  		if err := c.SignalContainer(unix.SIGKILL, true); err != nil {
  1089  			t.Fatalf("failed to send SIGKILL to container %q: %v", c.ID, err)
  1090  		}
  1091  
  1092  		// Check that all processes are gone.
  1093  		if err := waitForProcessCount(containers[1], 0); err != nil {
  1094  			t.Fatalf("error waiting for processes: %v", err)
  1095  		}
  1096  		// Check that root container was not affected.
  1097  		if err := waitForProcessCount(containers[0], rootProcCount); err != nil {
  1098  			t.Fatalf("error waiting for processes: %v", err)
  1099  		}
  1100  	}
  1101  }
  1102  
  1103  func TestMultiContainerDestroyNotStarted(t *testing.T) {
  1104  	specs, ids := createSpecs(sleepCmd, sleepCmd)
  1105  	conf := testutil.TestConfig(t)
  1106  	_, bundleDir, cleanup, err := testutil.SetupContainer(specs[0], conf)
  1107  	if err != nil {
  1108  		t.Fatalf("error setting up container: %v", err)
  1109  	}
  1110  	defer cleanup()
  1111  
  1112  	rootArgs := Args{
  1113  		ID:        ids[0],
  1114  		Spec:      specs[0],
  1115  		BundleDir: bundleDir,
  1116  	}
  1117  	root, err := New(conf, rootArgs)
  1118  	if err != nil {
  1119  		t.Fatalf("error creating root container: %v", err)
  1120  	}
  1121  	defer root.Destroy()
  1122  	if err := root.Start(conf); err != nil {
  1123  		t.Fatalf("error starting root container: %v", err)
  1124  	}
  1125  
  1126  	// Create and destroy sub-container.
  1127  	bundleDir, cleanupSub, err := testutil.SetupBundleDir(specs[1])
  1128  	if err != nil {
  1129  		t.Fatalf("error setting up container: %v", err)
  1130  	}
  1131  	defer cleanupSub()
  1132  
  1133  	args := Args{
  1134  		ID:        ids[1],
  1135  		Spec:      specs[1],
  1136  		BundleDir: bundleDir,
  1137  	}
  1138  	cont, err := New(conf, args)
  1139  	if err != nil {
  1140  		t.Fatalf("error creating container: %v", err)
  1141  	}
  1142  
  1143  	// Check that container can be destroyed.
  1144  	if err := cont.Destroy(); err != nil {
  1145  		t.Fatalf("deleting non-started container failed: %v", err)
  1146  	}
  1147  }
  1148  
  1149  // TestMultiContainerDestroyStarting attempts to force a race between start
  1150  // and destroy.
  1151  func TestMultiContainerDestroyStarting(t *testing.T) {
  1152  	cmds := make([][]string, 10)
  1153  	for i := range cmds {
  1154  		cmds[i] = sleepCmd
  1155  	}
  1156  	specs, ids := createSpecs(cmds...)
  1157  
  1158  	conf := testutil.TestConfig(t)
  1159  	rootDir, bundleDir, cleanup, err := testutil.SetupContainer(specs[0], conf)
  1160  	if err != nil {
  1161  		t.Fatalf("error setting up container: %v", err)
  1162  	}
  1163  	defer cleanup()
  1164  
  1165  	rootArgs := Args{
  1166  		ID:        ids[0],
  1167  		Spec:      specs[0],
  1168  		BundleDir: bundleDir,
  1169  	}
  1170  	root, err := New(conf, rootArgs)
  1171  	if err != nil {
  1172  		t.Fatalf("error creating root container: %v", err)
  1173  	}
  1174  	defer root.Destroy()
  1175  	if err := root.Start(conf); err != nil {
  1176  		t.Fatalf("error starting root container: %v", err)
  1177  	}
  1178  
  1179  	wg := sync.WaitGroup{}
  1180  	for i := range cmds {
  1181  		if i == 0 {
  1182  			continue // skip root container
  1183  		}
  1184  
  1185  		bundleDir, cleanup, err := testutil.SetupBundleDir(specs[i])
  1186  		if err != nil {
  1187  			t.Fatalf("error setting up container: %v", err)
  1188  		}
  1189  		defer cleanup()
  1190  
  1191  		rootArgs := Args{
  1192  			ID:        ids[i],
  1193  			Spec:      specs[i],
  1194  			BundleDir: bundleDir,
  1195  		}
  1196  		cont, err := New(conf, rootArgs)
  1197  		if err != nil {
  1198  			t.Fatalf("error creating container: %v", err)
  1199  		}
  1200  
  1201  		// Container is not thread safe, so load another instance to run in
  1202  		// concurrently.
  1203  		startCont, err := Load(rootDir, FullID{ContainerID: ids[i]}, LoadOpts{})
  1204  		if err != nil {
  1205  			t.Fatalf("error loading container: %v", err)
  1206  		}
  1207  		wg.Add(1)
  1208  		go func() {
  1209  			defer wg.Done()
  1210  			// Ignore failures, start can fail if destroy runs first.
  1211  			_ = startCont.Start(conf)
  1212  		}()
  1213  
  1214  		wg.Add(1)
  1215  		go func() {
  1216  			defer wg.Done()
  1217  			if err := cont.Destroy(); err != nil {
  1218  				t.Errorf("deleting non-started container failed: %v", err)
  1219  			}
  1220  		}()
  1221  	}
  1222  	wg.Wait()
  1223  }
  1224  
  1225  // TestMultiContainerDifferentFilesystems tests that different containers have
  1226  // different root filesystems.
  1227  func TestMultiContainerDifferentFilesystems(t *testing.T) {
  1228  	filename := "/foo"
  1229  	// Root container will create file and then sleep.
  1230  	cmdRoot := []string{"sh", "-c", fmt.Sprintf("touch %q && sleep 100", filename)}
  1231  
  1232  	// Child containers will assert that the file does not exist, and will
  1233  	// then create it.
  1234  	script := fmt.Sprintf("if [ -f %q ]; then exit 1; else touch %q; fi", filename, filename)
  1235  	cmd := []string{"sh", "-c", script}
  1236  
  1237  	rootDir, cleanup, err := testutil.SetupRootDir()
  1238  	if err != nil {
  1239  		t.Fatalf("error creating root dir: %v", err)
  1240  	}
  1241  	defer cleanup()
  1242  
  1243  	conf := testutil.TestConfig(t)
  1244  	conf.RootDir = rootDir
  1245  
  1246  	// Make sure overlay is enabled, and none of the root filesystems are
  1247  	// read-only, otherwise we won't be able to create the file.
  1248  	conf.Overlay2.Set("all:memory")
  1249  	specs, ids := createSpecs(cmdRoot, cmd, cmd)
  1250  	for _, s := range specs {
  1251  		s.Root.Readonly = false
  1252  	}
  1253  
  1254  	containers, cleanup, err := startContainers(conf, specs, ids)
  1255  	if err != nil {
  1256  		t.Fatalf("error starting containers: %v", err)
  1257  	}
  1258  	defer cleanup()
  1259  
  1260  	// Both child containers should exit successfully.
  1261  	for i, c := range containers {
  1262  		if i == 0 {
  1263  			// Don't wait on the root.
  1264  			continue
  1265  		}
  1266  		if ws, err := c.Wait(); err != nil {
  1267  			t.Errorf("failed to wait for process %s: %v", c.Spec.Process.Args, err)
  1268  		} else if es := ws.ExitStatus(); es != 0 {
  1269  			t.Errorf("process %s exited with non-zero status %d", c.Spec.Process.Args, es)
  1270  		}
  1271  	}
  1272  }
  1273  
  1274  // TestMultiContainerContainerDestroyStress tests that IO operations continue
  1275  // to work after containers have been stopped and gofers killed.
  1276  func TestMultiContainerContainerDestroyStress(t *testing.T) {
  1277  	app, err := testutil.FindFile("test/cmd/test_app/test_app")
  1278  	if err != nil {
  1279  		t.Fatal("error finding test_app:", err)
  1280  	}
  1281  
  1282  	// Setup containers. Root container just reaps children, while the others
  1283  	// perform some IOs. Children are executed in 3 batches of 10. Within the
  1284  	// batch there is overlap between containers starting and being destroyed. In
  1285  	// between batches all containers stop before starting another batch.
  1286  	cmds := [][]string{{app, "reaper"}}
  1287  	const batchSize = 10
  1288  	for i := 0; i < 3*batchSize; i++ {
  1289  		dir, err := ioutil.TempDir(testutil.TmpDir(), "gofer-stop-test")
  1290  		if err != nil {
  1291  			t.Fatal("ioutil.TempDir failed:", err)
  1292  		}
  1293  		defer os.RemoveAll(dir)
  1294  
  1295  		cmd := "find /bin -type f | head | xargs -I SRC cp SRC " + dir
  1296  		cmds = append(cmds, []string{"sh", "-c", cmd})
  1297  	}
  1298  	allSpecs, allIDs := createSpecs(cmds...)
  1299  
  1300  	// Split up the specs and IDs.
  1301  	rootSpec := allSpecs[0]
  1302  	rootID := allIDs[0]
  1303  	childrenSpecs := allSpecs[1:]
  1304  	childrenIDs := allIDs[1:]
  1305  
  1306  	conf := testutil.TestConfig(t)
  1307  	_, bundleDir, cleanup, err := testutil.SetupContainer(rootSpec, conf)
  1308  	if err != nil {
  1309  		t.Fatalf("error setting up container: %v", err)
  1310  	}
  1311  	defer cleanup()
  1312  
  1313  	// Start root container.
  1314  	rootArgs := Args{
  1315  		ID:        rootID,
  1316  		Spec:      rootSpec,
  1317  		BundleDir: bundleDir,
  1318  	}
  1319  	root, err := New(conf, rootArgs)
  1320  	if err != nil {
  1321  		t.Fatalf("error creating root container: %v", err)
  1322  	}
  1323  	if err := root.Start(conf); err != nil {
  1324  		t.Fatalf("error starting root container: %v", err)
  1325  	}
  1326  	defer root.Destroy()
  1327  
  1328  	// Run batches. Each batch starts containers in parallel, then wait and
  1329  	// destroy them before starting another batch.
  1330  	for i := 0; i < len(childrenSpecs); i += batchSize {
  1331  		t.Logf("Starting batch from %d to %d", i, i+batchSize)
  1332  		specs := childrenSpecs[i : i+batchSize]
  1333  		ids := childrenIDs[i : i+batchSize]
  1334  
  1335  		var children []*Container
  1336  		for j, spec := range specs {
  1337  			bundleDir, cleanup, err := testutil.SetupBundleDir(spec)
  1338  			if err != nil {
  1339  				t.Fatalf("error setting up container: %v", err)
  1340  			}
  1341  			defer cleanup()
  1342  
  1343  			args := Args{
  1344  				ID:        ids[j],
  1345  				Spec:      spec,
  1346  				BundleDir: bundleDir,
  1347  			}
  1348  			child, err := New(conf, args)
  1349  			if err != nil {
  1350  				t.Fatalf("error creating container: %v", err)
  1351  			}
  1352  			children = append(children, child)
  1353  
  1354  			if err := child.Start(conf); err != nil {
  1355  				t.Fatalf("error starting container: %v", err)
  1356  			}
  1357  
  1358  			// Give a small gap between containers.
  1359  			time.Sleep(50 * time.Millisecond)
  1360  		}
  1361  		for _, child := range children {
  1362  			ws, err := child.Wait()
  1363  			if err != nil {
  1364  				t.Fatalf("waiting for container: %v", err)
  1365  			}
  1366  			if !ws.Exited() || ws.ExitStatus() != 0 {
  1367  				t.Fatalf("container failed, waitStatus: %x (%d)", ws, ws.ExitStatus())
  1368  			}
  1369  			if err := child.Destroy(); err != nil {
  1370  				t.Fatalf("error destroying container: %v", err)
  1371  			}
  1372  		}
  1373  	}
  1374  }
  1375  
  1376  // Test that pod shared mounts are properly mounted in 2 containers and that
  1377  // changes from one container is reflected in the other.
  1378  func TestMultiContainerSharedMount(t *testing.T) {
  1379  	testSharedMount(t, func(t *testing.T, conf *config.Config, sourceDir string, mntType string) {
  1380  		// Setup the containers.
  1381  		podSpec, ids := createSpecs(sleepCmd, sleepCmd)
  1382  		mnt0 := specs.Mount{
  1383  			Destination: "/mydir/test",
  1384  			Source:      sourceDir,
  1385  			Type:        mntType,
  1386  			Options:     nil,
  1387  		}
  1388  		podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0)
  1389  
  1390  		mnt1 := mnt0
  1391  		mnt1.Destination = "/mydir2/test2"
  1392  		podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1)
  1393  
  1394  		createSharedMount(mnt0, "test-mount", podSpec...)
  1395  
  1396  		containers, cleanup, err := startContainers(conf, podSpec, ids)
  1397  		if err != nil {
  1398  			t.Fatalf("error starting containers: %v", err)
  1399  		}
  1400  		defer cleanup()
  1401  
  1402  		file0 := path.Join(mnt0.Destination, "abc")
  1403  		file1 := path.Join(mnt1.Destination, "abc")
  1404  		execs := []execDesc{
  1405  			{
  1406  				c:    containers[0],
  1407  				cmd:  []string{"/usr/bin/test", "-d", mnt0.Destination},
  1408  				name: "directory is mounted in container0",
  1409  			},
  1410  			{
  1411  				c:    containers[1],
  1412  				cmd:  []string{"/usr/bin/test", "-d", mnt1.Destination},
  1413  				name: "directory is mounted in container1",
  1414  			},
  1415  			{
  1416  				c:    containers[0],
  1417  				cmd:  []string{"/bin/touch", file0},
  1418  				name: "create file in container0",
  1419  			},
  1420  			{
  1421  				c:    containers[0],
  1422  				cmd:  []string{"/usr/bin/test", "-f", file0},
  1423  				name: "file appears in container0",
  1424  			},
  1425  			{
  1426  				c:    containers[1],
  1427  				cmd:  []string{"/usr/bin/test", "-f", file1},
  1428  				name: "file appears in container1",
  1429  			},
  1430  			{
  1431  				c:    containers[1],
  1432  				cmd:  []string{"/bin/rm", file1},
  1433  				name: "remove file from container1",
  1434  			},
  1435  			{
  1436  				c:    containers[0],
  1437  				cmd:  []string{"/usr/bin/test", "!", "-f", file0},
  1438  				name: "file removed from container0",
  1439  			},
  1440  			{
  1441  				c:    containers[1],
  1442  				cmd:  []string{"/usr/bin/test", "!", "-f", file1},
  1443  				name: "file removed from container1",
  1444  			},
  1445  			{
  1446  				c:    containers[1],
  1447  				cmd:  []string{"/bin/mkdir", file1},
  1448  				name: "create directory in container1",
  1449  			},
  1450  			{
  1451  				c:    containers[0],
  1452  				cmd:  []string{"/usr/bin/test", "-d", file0},
  1453  				name: "dir appears in container0",
  1454  			},
  1455  			{
  1456  				c:    containers[1],
  1457  				cmd:  []string{"/usr/bin/test", "-d", file1},
  1458  				name: "dir appears in container1",
  1459  			},
  1460  			{
  1461  				c:    containers[0],
  1462  				cmd:  []string{"/bin/rmdir", file0},
  1463  				name: "remove directory from container0",
  1464  			},
  1465  			{
  1466  				c:    containers[0],
  1467  				cmd:  []string{"/usr/bin/test", "!", "-d", file0},
  1468  				name: "dir removed from container0",
  1469  			},
  1470  			{
  1471  				c:    containers[1],
  1472  				cmd:  []string{"/usr/bin/test", "!", "-d", file1},
  1473  				name: "dir removed from container1",
  1474  			},
  1475  		}
  1476  		execMany(t, conf, execs)
  1477  	})
  1478  }
  1479  
  1480  // Test that pod mounts are mounted as readonly when requested.
  1481  func TestMultiContainerSharedMountReadonly(t *testing.T) {
  1482  	testSharedMount(t, func(t *testing.T, conf *config.Config, sourceDir string, mntType string) {
  1483  		// Setup the containers.
  1484  		podSpec, ids := createSpecs(sleepCmd, sleepCmd)
  1485  		mnt0 := specs.Mount{
  1486  			Destination: "/mydir/test",
  1487  			Source:      sourceDir,
  1488  			Type:        mntType,
  1489  			Options:     []string{"ro"},
  1490  		}
  1491  		podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0)
  1492  
  1493  		mnt1 := mnt0
  1494  		mnt1.Destination = "/mydir2/test2"
  1495  		podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1)
  1496  
  1497  		createSharedMount(mnt0, "test-mount", podSpec...)
  1498  
  1499  		containers, cleanup, err := startContainers(conf, podSpec, ids)
  1500  		if err != nil {
  1501  			t.Fatalf("error starting containers: %v", err)
  1502  		}
  1503  		defer cleanup()
  1504  
  1505  		file0 := path.Join(mnt0.Destination, "abc")
  1506  		file1 := path.Join(mnt1.Destination, "abc")
  1507  		execs := []execDesc{
  1508  			{
  1509  				c:    containers[0],
  1510  				cmd:  []string{"/usr/bin/test", "-d", mnt0.Destination},
  1511  				name: "directory is mounted in container0",
  1512  			},
  1513  			{
  1514  				c:    containers[1],
  1515  				cmd:  []string{"/usr/bin/test", "-d", mnt1.Destination},
  1516  				name: "directory is mounted in container1",
  1517  			},
  1518  			{
  1519  				c:    containers[0],
  1520  				cmd:  []string{"/bin/touch", file0},
  1521  				want: 1,
  1522  				name: "fails to write to container0",
  1523  			},
  1524  			{
  1525  				c:    containers[1],
  1526  				cmd:  []string{"/bin/touch", file1},
  1527  				want: 1,
  1528  				name: "fails to write to container1",
  1529  			},
  1530  		}
  1531  		execMany(t, conf, execs)
  1532  	})
  1533  }
  1534  
  1535  // Test that pod mounts can be mounted with less restrictive options in
  1536  // container mounts.
  1537  func TestMultiContainerSharedMountCompatible(t *testing.T) {
  1538  	testSharedMount(t, func(t *testing.T, conf *config.Config, sourceDir string, mntType string) {
  1539  		podSpec, ids := createSpecs(sleepCmd, sleepCmd)
  1540  
  1541  		// Init container and annotations allow read-write and exec.
  1542  		mnt0 := specs.Mount{
  1543  			Destination: "/mydir/test",
  1544  			Source:      sourceDir,
  1545  			Type:        mntType,
  1546  			Options:     []string{"rw", "exec"},
  1547  		}
  1548  		podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0)
  1549  
  1550  		// While subcontainer mount has more restrictive options: read-only, noexec.
  1551  		mnt1 := mnt0
  1552  		mnt1.Destination = "/mydir2/test2"
  1553  		mnt1.Options = []string{"ro", "noexec"}
  1554  		podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1)
  1555  
  1556  		createSharedMount(mnt0, "test-mount", podSpec...)
  1557  
  1558  		containers, cleanup, err := startContainers(conf, podSpec, ids)
  1559  		if err != nil {
  1560  			t.Fatalf("error starting containers: %v", err)
  1561  		}
  1562  		defer cleanup()
  1563  
  1564  		execs := []execDesc{
  1565  			{
  1566  				c:    containers[1],
  1567  				cmd:  []string{"/bin/touch", path.Join(mnt1.Destination, "fail")},
  1568  				want: 1,
  1569  				name: "fails write to container1",
  1570  			},
  1571  			{
  1572  				c:    containers[0],
  1573  				cmd:  []string{"/bin/cp", "/usr/bin/test", mnt0.Destination},
  1574  				name: "writes to container0",
  1575  			},
  1576  			{
  1577  				c:    containers[1],
  1578  				cmd:  []string{"/usr/bin/test", "-f", path.Join(mnt1.Destination, "test")},
  1579  				name: "file appears in container1",
  1580  			},
  1581  			{
  1582  				c:    containers[0],
  1583  				cmd:  []string{path.Join(mnt0.Destination, "test"), "-d", mnt0.Destination},
  1584  				name: "container0 can execute",
  1585  			},
  1586  			{
  1587  				c:    containers[1],
  1588  				cmd:  []string{path.Join(mnt1.Destination, "test"), "-d", mnt1.Destination},
  1589  				err:  "permission denied",
  1590  				name: "container1 cannot execute",
  1591  			},
  1592  		}
  1593  		execMany(t, conf, execs)
  1594  	})
  1595  }
  1596  
  1597  // Test that shared pod mounts continue to work after container is restarted.
  1598  func TestMultiContainerSharedMountRestart(t *testing.T) {
  1599  	for numSubConts := 1; numSubConts <= 2; numSubConts++ {
  1600  		testSharedMount(t, func(t *testing.T, conf *config.Config, sourceDir string, mntType string) {
  1601  			// Setup the containers.
  1602  			var cmds [][]string
  1603  			for i := 0; i <= numSubConts; i++ {
  1604  				cmds = append(cmds, sleepCmd)
  1605  			}
  1606  			podSpec, ids := createSpecs(cmds...)
  1607  
  1608  			// Add a shared mount to all subcontainers.
  1609  			mnt := specs.Mount{
  1610  				Source:  sourceDir,
  1611  				Type:    mntType,
  1612  				Options: nil,
  1613  			}
  1614  			for i := 1; i <= numSubConts; i++ {
  1615  				mnt.Destination = fmt.Sprintf("/mydir/test%d", i)
  1616  				podSpec[i].Mounts = append(podSpec[i].Mounts, mnt)
  1617  			}
  1618  
  1619  			createSharedMount(mnt, "test-mount", podSpec...)
  1620  
  1621  			containers, cleanup, err := startContainers(conf, podSpec, ids)
  1622  			if err != nil {
  1623  				t.Fatalf("error starting containers: %v", err)
  1624  			}
  1625  			defer cleanup()
  1626  
  1627  			// Create file in first subcontainer.
  1628  			file1 := "/mydir/test1/abc"
  1629  			execs := []execDesc{
  1630  				{
  1631  					c:    containers[1],
  1632  					cmd:  []string{"/bin/touch", file1},
  1633  					name: "create file in container1",
  1634  				},
  1635  			}
  1636  			// Check it appears in all subcontainers.
  1637  			for i := 1; i <= numSubConts; i++ {
  1638  				fileName := fmt.Sprintf("/mydir/test%d/abc", i)
  1639  				execs = append(execs, execDesc{
  1640  					c:    containers[i],
  1641  					cmd:  []string{"/usr/bin/test", "-f", fileName},
  1642  					name: fmt.Sprintf("file appears in container%d", i),
  1643  				})
  1644  			}
  1645  			execMany(t, conf, execs)
  1646  
  1647  			// Restart first subcontainer.
  1648  			containers[1].Destroy()
  1649  
  1650  			bundleDir, cleanup, err := testutil.SetupBundleDir(podSpec[1])
  1651  			if err != nil {
  1652  				t.Fatalf("error restarting container: %v", err)
  1653  			}
  1654  			defer cleanup()
  1655  
  1656  			args := Args{
  1657  				ID:        ids[1],
  1658  				Spec:      podSpec[1],
  1659  				BundleDir: bundleDir,
  1660  			}
  1661  			containers[1], err = New(conf, args)
  1662  			if err != nil {
  1663  				t.Fatalf("error creating container: %v", err)
  1664  			}
  1665  			if err := containers[1].Start(conf); err != nil {
  1666  				t.Fatalf("error starting container: %v", err)
  1667  			}
  1668  
  1669  			// Ensure that the file exists in all subcontainers.
  1670  			execs = nil
  1671  			for i := 1; i <= numSubConts; i++ {
  1672  				fileName := fmt.Sprintf("/mydir/test%d/abc", i)
  1673  				execs = append(execs, execDesc{
  1674  					c:    containers[i],
  1675  					cmd:  []string{"/usr/bin/test", "-f", fileName},
  1676  					name: fmt.Sprintf("file appears in container%d", i),
  1677  				})
  1678  			}
  1679  			execMany(t, conf, execs)
  1680  		})
  1681  	}
  1682  }
  1683  
  1684  // Test that unsupported pod mounts options are ignored when matching master and
  1685  // replica mounts.
  1686  func TestMultiContainerSharedMountUnsupportedOptions(t *testing.T) {
  1687  	testSharedMount(t, func(t *testing.T, conf *config.Config, sourceDir string, mntType string) {
  1688  		// Setup the containers.
  1689  		podSpec, ids := createSpecs(sleepCmd, sleepCmd)
  1690  		mnt0 := specs.Mount{
  1691  			Destination: "/mydir/test",
  1692  			Source:      sourceDir,
  1693  			Type:        mntType,
  1694  			Options:     []string{"rw", "relatime"},
  1695  		}
  1696  		podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0)
  1697  
  1698  		mnt1 := mnt0
  1699  		mnt1.Destination = "/mydir2/test2"
  1700  		mnt1.Options = []string{"rw", "nosuid"}
  1701  		podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1)
  1702  
  1703  		createSharedMount(mnt0, "test-mount", podSpec...)
  1704  
  1705  		containers, cleanup, err := startContainers(conf, podSpec, ids)
  1706  		if err != nil {
  1707  			t.Fatalf("error starting containers: %v", err)
  1708  		}
  1709  		defer cleanup()
  1710  
  1711  		execs := []execDesc{
  1712  			{
  1713  				c:    containers[0],
  1714  				cmd:  []string{"/usr/bin/test", "-d", mnt0.Destination},
  1715  				name: "directory is mounted in container0",
  1716  			},
  1717  			{
  1718  				c:    containers[1],
  1719  				cmd:  []string{"/usr/bin/test", "-d", mnt1.Destination},
  1720  				name: "directory is mounted in container1",
  1721  			},
  1722  		}
  1723  		execMany(t, conf, execs)
  1724  	})
  1725  }
  1726  
  1727  // Test that shared mounts can be repeated within a container.
  1728  func TestMultiContainerSharedMountsRepeated(t *testing.T) {
  1729  	testSharedMount(t, func(t *testing.T, conf *config.Config, sourceDir string, mntType string) {
  1730  		// Setup the containers.
  1731  		podSpec, ids := createSpecs(sleepCmd)
  1732  		mnt0 := specs.Mount{
  1733  			Destination: "/mydir/test1",
  1734  			Source:      sourceDir,
  1735  			Type:        mntType,
  1736  			Options:     []string{"rw", "relatime"},
  1737  		}
  1738  		mnt1 := specs.Mount{
  1739  			Destination: "/mydir/test2",
  1740  			Source:      sourceDir,
  1741  			Type:        mntType,
  1742  			Options:     []string{"ro"},
  1743  		}
  1744  		podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0, mnt1)
  1745  
  1746  		// Set annotations using less-restrictive mnt0.
  1747  		createSharedMount(mnt0, "test-mount", podSpec...)
  1748  
  1749  		containers, cleanup, err := startContainers(conf, podSpec, ids)
  1750  		if err != nil {
  1751  			t.Fatalf("error starting containers: %v", err)
  1752  		}
  1753  		defer cleanup()
  1754  
  1755  		execs := []execDesc{
  1756  			{
  1757  				c:    containers[0],
  1758  				cmd:  []string{"/bin/touch", path.Join(mnt1.Destination, "fail")},
  1759  				want: 1,
  1760  				name: "fails write to read-only mount",
  1761  			},
  1762  			{
  1763  				c:    containers[0],
  1764  				cmd:  []string{"/bin/cp", "/usr/bin/test", mnt0.Destination},
  1765  				name: "writes to writable mount",
  1766  			},
  1767  			{
  1768  				c:    containers[0],
  1769  				cmd:  []string{"/usr/bin/test", "-f", path.Join(mnt1.Destination, "test")},
  1770  				name: "file appears in read-only mount",
  1771  			},
  1772  			{
  1773  				c:    containers[0],
  1774  				cmd:  []string{"/usr/bin/test", "-f", path.Join(mnt0.Destination, "test")},
  1775  				name: "file appears in writable mount",
  1776  			},
  1777  		}
  1778  		execMany(t, conf, execs)
  1779  	})
  1780  }
  1781  
  1782  // This test checks that a bind mount that is "shared" is overlaid correctly
  1783  // with a self-backed tmpfs.
  1784  func TestMultiContainerSharedBindMount(t *testing.T) {
  1785  	for numContainers := 1; numContainers <= 2; numContainers++ {
  1786  		testSharedMount(t, func(t *testing.T, conf *config.Config, sourceDir string, mntType string) {
  1787  			if mntType != "bind" {
  1788  				t.Skipf("This test is only for shared bind mounts, skipping %q mount type", mntType)
  1789  			}
  1790  			t.Run(fmt.Sprintf("containers-%d", numContainers), func(t *testing.T) {
  1791  				// Setup the containers.
  1792  				var cmds [][]string
  1793  				for i := 0; i < numContainers; i++ {
  1794  					cmds = append(cmds, sleepCmd)
  1795  				}
  1796  				podSpec, ids := createSpecs(cmds...)
  1797  
  1798  				sharedMount := specs.Mount{
  1799  					Destination: "/mydir/test",
  1800  					Source:      sourceDir,
  1801  					Type:        mntType,
  1802  				}
  1803  				for _, spec := range podSpec {
  1804  					spec.Mounts = append(spec.Mounts, sharedMount)
  1805  				}
  1806  
  1807  				createSharedMount(sharedMount, "test-mount", podSpec...)
  1808  
  1809  				containers, cleanup, err := startContainers(conf, podSpec, ids)
  1810  				if err != nil {
  1811  					t.Fatalf("error starting containers: %v", err)
  1812  				}
  1813  				destroyed := false
  1814  				destroy := func() {
  1815  					if destroyed {
  1816  						return
  1817  					}
  1818  					destroyed = true
  1819  					cleanup()
  1820  				}
  1821  				defer destroy()
  1822  
  1823  				// Create a file in shared mount with a few bytes in each container.
  1824  				var execs []execDesc
  1825  				for i, c := range containers {
  1826  					testFileName := fmt.Sprintf("testfile-%d", i)
  1827  					testFilePath := path.Join(sharedMount.Destination, testFileName)
  1828  					execs = append(execs, execDesc{
  1829  						c:    c,
  1830  						cmd:  []string{"/bin/sh", "-c", "echo hello > " + testFilePath},
  1831  						name: fmt.Sprintf("file created in container %d", i),
  1832  					})
  1833  				}
  1834  				execMany(t, conf, execs)
  1835  
  1836  				// Check that the file is not created on the host.
  1837  				for i := 0; i < numContainers; i++ {
  1838  					testFileName := fmt.Sprintf("testfile-%d", i)
  1839  					if _, err := os.Stat(path.Join(sourceDir, testFileName)); err == nil {
  1840  						t.Errorf("%q file created on the host in spite of tmpfs", testFileName)
  1841  					}
  1842  				}
  1843  
  1844  				// Check that the filestore file is created and is not empty.
  1845  				filestoreFile := boot.SelfFilestorePath(sourceDir, containers[0].sandboxID())
  1846  				var stat unix.Stat_t
  1847  				if err := unix.Stat(filestoreFile, &stat); err != nil {
  1848  					t.Fatalf("unix.Stat(%q) failed for submount filestore: %v", filestoreFile, err)
  1849  				}
  1850  				if stat.Blocks == 0 {
  1851  					t.Errorf("submount filestore file %q is empty", filestoreFile)
  1852  				}
  1853  
  1854  				// Ensure the shared mount is tmpfs.
  1855  				for i, c := range containers {
  1856  					got, err := executeCombinedOutput(conf, c, nil, "/bin/sh", "-c", "mount | grep "+sharedMount.Destination)
  1857  					if err != nil {
  1858  						t.Fatalf("failed to grep mount(1) from container %d: %v", i, err)
  1859  					}
  1860  					if !strings.Contains(string(got), "type tmpfs (rw)") {
  1861  						t.Errorf("expected %s to be a tmpfs mount in container %d. mount(1) reports its type as:\n%s", sharedMount.Destination, i, string(got))
  1862  					}
  1863  				}
  1864  
  1865  				// Destroying the containers should delete the filestore file.
  1866  				destroy()
  1867  				if err := unix.Stat(filestoreFile, &stat); err == nil {
  1868  					t.Fatalf("overlay filestore at %q was not deleted after container.Destroy()", filestoreFile)
  1869  				}
  1870  			})
  1871  		})
  1872  	}
  1873  }
  1874  
  1875  // Test that one container can send an FD to another container, even though
  1876  // they have distinct MountNamespaces.
  1877  func TestMultiContainerMultiRootCanHandleFDs(t *testing.T) {
  1878  	app, err := testutil.FindFile("test/cmd/test_app/test_app")
  1879  	if err != nil {
  1880  		t.Fatal("error finding test_app:", err)
  1881  	}
  1882  
  1883  	testSharedMount(t, func(t *testing.T, conf *config.Config, sourceDir string, mntType string) {
  1884  		// We set up two containers with one shared mount that is used for a
  1885  		// shared socket. The first container will send an FD over the socket
  1886  		// to the second container. The FD corresponds to a file in the first
  1887  		// container's mount namespace that is not part of the second
  1888  		// container's mount namespace. However, the second container still
  1889  		// should be able to read the FD.
  1890  
  1891  		// Create a shared mount where we will put the socket.
  1892  		sharedMnt := specs.Mount{
  1893  			Destination: "/mydir/test",
  1894  			Type:        mntType,
  1895  			Source:      sourceDir,
  1896  		}
  1897  		socketPath := filepath.Join(sharedMnt.Destination, "socket")
  1898  
  1899  		// Create a writeable tmpfs mount where the FD sender app will create
  1900  		// files to send. This will only be mounted in the FD sender.
  1901  		writeableMnt := specs.Mount{
  1902  			Destination: "/tmp",
  1903  			Type:        "tmpfs",
  1904  		}
  1905  
  1906  		// Create the specs.
  1907  		specs, ids := createSpecs(
  1908  			sleepCmd,
  1909  			[]string{app, "fd_sender", "--socket", socketPath},
  1910  			[]string{app, "fd_receiver", "--socket", socketPath},
  1911  		)
  1912  		specs[1].Mounts = append(specs[1].Mounts, sharedMnt, writeableMnt)
  1913  		specs[2].Mounts = append(specs[2].Mounts, sharedMnt)
  1914  		createSharedMount(sharedMnt, "shared-mount", specs...)
  1915  
  1916  		containers, cleanup, err := startContainers(conf, specs, ids)
  1917  		if err != nil {
  1918  			t.Fatalf("error starting containers: %v", err)
  1919  		}
  1920  		defer cleanup()
  1921  
  1922  		// Both containers should exit successfully.
  1923  		for _, c := range containers[1:] {
  1924  			if ws, err := c.Wait(); err != nil {
  1925  				t.Errorf("failed to wait for process %s: %v", c.Spec.Process.Args, err)
  1926  			} else if es := ws.ExitStatus(); es != 0 {
  1927  				t.Errorf("process %s exited with non-zero status %d", c.Spec.Process.Args, es)
  1928  			}
  1929  		}
  1930  	})
  1931  }
  1932  
  1933  // Test that container is destroyed when Gofer is killed.
  1934  func TestMultiContainerGoferKilled(t *testing.T) {
  1935  	rootDir, cleanup, err := testutil.SetupRootDir()
  1936  	if err != nil {
  1937  		t.Fatalf("error creating root dir: %v", err)
  1938  	}
  1939  	defer cleanup()
  1940  
  1941  	conf := testutil.TestConfig(t)
  1942  	conf.RootDir = rootDir
  1943  
  1944  	specs, ids := createSpecs(sleepCmd, sleepCmd, sleepCmd)
  1945  	containers, cleanup, err := startContainers(conf, specs, ids)
  1946  	if err != nil {
  1947  		t.Fatalf("error starting containers: %v", err)
  1948  	}
  1949  	defer cleanup()
  1950  
  1951  	// Ensure container is running
  1952  	c := containers[2]
  1953  	expectedPL := []*control.Process{
  1954  		newProcessBuilder().PID(3).Cmd("sleep").Process(),
  1955  	}
  1956  	if err := waitForProcessList(c, expectedPL); err != nil {
  1957  		t.Errorf("failed to wait for sleep to start: %v", err)
  1958  	}
  1959  
  1960  	// Kill container's gofer.
  1961  	if err := unix.Kill(c.GoferPid, unix.SIGKILL); err != nil {
  1962  		t.Fatalf("unix.Kill(%d, SIGKILL)=%v", c.GoferPid, err)
  1963  	}
  1964  
  1965  	// Wait until container stops.
  1966  	if err := waitForProcessList(c, nil); err != nil {
  1967  		t.Errorf("Container %q was not stopped after gofer death: %v", c.ID, err)
  1968  	}
  1969  
  1970  	// Check that container isn't running anymore.
  1971  	if _, err := execute(conf, c, "/bin/true"); err == nil {
  1972  		t.Fatalf("Container %q was not stopped after gofer death", c.ID)
  1973  	}
  1974  
  1975  	// Check that other containers are unaffected.
  1976  	for i, c := range containers {
  1977  		if i == 2 {
  1978  			continue // container[2] has been killed.
  1979  		}
  1980  		pl := []*control.Process{
  1981  			newProcessBuilder().PID(kernel.ThreadID(i + 1)).Cmd("sleep").Process(),
  1982  		}
  1983  		if err := waitForProcessList(c, pl); err != nil {
  1984  			t.Errorf("Container %q was affected by another container: %v", c.ID, err)
  1985  		}
  1986  		if _, err := execute(conf, c, "/bin/true"); err != nil {
  1987  			t.Fatalf("Container %q was affected by another container: %v", c.ID, err)
  1988  		}
  1989  	}
  1990  
  1991  	// Kill root container's gofer to bring entire sandbox down.
  1992  	c = containers[0]
  1993  	if err := unix.Kill(c.GoferPid, unix.SIGKILL); err != nil {
  1994  		t.Fatalf("unix.Kill(%d, SIGKILL)=%v", c.GoferPid, err)
  1995  	}
  1996  
  1997  	// Wait until sandbox stops. waitForProcessList will loop until sandbox exits
  1998  	// and RPC errors out.
  1999  	impossiblePL := []*control.Process{
  2000  		newProcessBuilder().Cmd("non-existent-process").Process(),
  2001  	}
  2002  	if err := waitForProcessList(c, impossiblePL); err == nil {
  2003  		t.Fatalf("Sandbox was not killed after gofer death")
  2004  	}
  2005  
  2006  	// Check that entire sandbox isn't running anymore.
  2007  	for _, c := range containers {
  2008  		if _, err := execute(conf, c, "/bin/true"); err == nil {
  2009  			t.Fatalf("Container %q was not stopped after gofer death", c.ID)
  2010  		}
  2011  	}
  2012  }
  2013  
  2014  func TestMultiContainerLoadSandbox(t *testing.T) {
  2015  	specs, ids := createSpecs(sleepCmd, sleepCmd, sleepCmd)
  2016  
  2017  	rootDir, cleanup, err := testutil.SetupRootDir()
  2018  	if err != nil {
  2019  		t.Fatalf("error creating root dir: %v", err)
  2020  	}
  2021  	defer cleanup()
  2022  
  2023  	conf := testutil.TestConfig(t)
  2024  	conf.RootDir = rootDir
  2025  
  2026  	// Create containers for the sandbox.
  2027  	wants, cleanup, err := startContainers(conf, specs, ids)
  2028  	if err != nil {
  2029  		t.Fatalf("error starting containers: %v", err)
  2030  	}
  2031  	defer cleanup()
  2032  
  2033  	// Then create unrelated containers.
  2034  	for i := 0; i < 3; i++ {
  2035  		specs, ids = createSpecs(sleepCmd, sleepCmd, sleepCmd)
  2036  		_, cleanup, err = startContainers(conf, specs, ids)
  2037  		if err != nil {
  2038  			t.Fatalf("error starting containers: %v", err)
  2039  		}
  2040  		defer cleanup()
  2041  	}
  2042  
  2043  	// Create an unrelated directory under root.
  2044  	dir := filepath.Join(conf.RootDir, "not-a-container")
  2045  	if err := os.MkdirAll(dir, 0755); err != nil {
  2046  		t.Fatalf("os.MkdirAll(%q)=%v", dir, err)
  2047  	}
  2048  
  2049  	// Create a valid but empty container directory.
  2050  	randomCID := testutil.RandomContainerID()
  2051  	dir = filepath.Join(conf.RootDir, randomCID)
  2052  	if err := os.MkdirAll(dir, 0755); err != nil {
  2053  		t.Fatalf("os.MkdirAll(%q)=%v", dir, err)
  2054  	}
  2055  
  2056  	// Load the sandbox and check that the correct containers were returned.
  2057  	id := wants[0].Sandbox.ID
  2058  	gots, err := LoadSandbox(conf.RootDir, id, LoadOpts{})
  2059  	if err != nil {
  2060  		t.Fatalf("loadSandbox()=%v", err)
  2061  	}
  2062  	wantIDs := make(map[string]struct{})
  2063  	for _, want := range wants {
  2064  		wantIDs[want.ID] = struct{}{}
  2065  	}
  2066  	for _, got := range gots {
  2067  		if got.Sandbox.ID != id {
  2068  			t.Errorf("wrong sandbox ID, got: %v, want: %v", got.Sandbox.ID, id)
  2069  		}
  2070  		if _, ok := wantIDs[got.ID]; !ok {
  2071  			t.Errorf("wrong container ID, got: %v, wants: %v", got.ID, wantIDs)
  2072  		}
  2073  		delete(wantIDs, got.ID)
  2074  	}
  2075  	if len(wantIDs) != 0 {
  2076  		t.Errorf("containers not found: %v", wantIDs)
  2077  	}
  2078  }
  2079  
  2080  // TestMultiContainerRunNonRoot checks that child container can be configured
  2081  // when running as non-privileged user.
  2082  func TestMultiContainerRunNonRoot(t *testing.T) {
  2083  	cmdSub := []string{"/bin/true"}
  2084  	podSpecs, ids := createSpecs(sleepCmd, cmdSub)
  2085  
  2086  	// User running inside container can't list '$TMP/blocked' and would fail to
  2087  	// mount it.
  2088  	blocked, err := ioutil.TempDir(testutil.TmpDir(), "blocked")
  2089  	if err != nil {
  2090  		t.Fatalf("ioutil.TempDir() failed: %v", err)
  2091  	}
  2092  	if err := os.Chmod(blocked, 0700); err != nil {
  2093  		t.Fatalf("os.MkDir(%q) failed: %v", blocked, err)
  2094  	}
  2095  	dir := path.Join(blocked, "test")
  2096  	if err := os.Mkdir(dir, 0755); err != nil {
  2097  		t.Fatalf("os.MkDir(%q) failed: %v", dir, err)
  2098  	}
  2099  
  2100  	src, err := ioutil.TempDir(testutil.TmpDir(), "src")
  2101  	if err != nil {
  2102  		t.Fatalf("ioutil.TempDir() failed: %v", err)
  2103  	}
  2104  
  2105  	// Set a random user/group with no access to "blocked" dir.
  2106  	podSpecs[1].Process.User.UID = 343
  2107  	podSpecs[1].Process.User.GID = 2401
  2108  	podSpecs[1].Process.Capabilities = nil
  2109  
  2110  	podSpecs[1].Mounts = append(podSpecs[1].Mounts, specs.Mount{
  2111  		Destination: dir,
  2112  		Source:      src,
  2113  		Type:        "bind",
  2114  	})
  2115  
  2116  	rootDir, cleanup, err := testutil.SetupRootDir()
  2117  	if err != nil {
  2118  		t.Fatalf("error creating root dir: %v", err)
  2119  	}
  2120  	defer cleanup()
  2121  
  2122  	conf := testutil.TestConfig(t)
  2123  	conf.RootDir = rootDir
  2124  
  2125  	pod, cleanup, err := startContainers(conf, podSpecs, ids)
  2126  	if err != nil {
  2127  		t.Fatalf("error starting containers: %v", err)
  2128  	}
  2129  	defer cleanup()
  2130  
  2131  	// Once all containers are started, wait for the child container to exit.
  2132  	// This means that the volume was mounted properly.
  2133  	ws, err := pod[1].Wait()
  2134  	if err != nil {
  2135  		t.Fatalf("running child container: %v", err)
  2136  	}
  2137  	if !ws.Exited() || ws.ExitStatus() != 0 {
  2138  		t.Fatalf("child container failed, waitStatus: %v", ws)
  2139  	}
  2140  }
  2141  
  2142  // TestMultiContainerHomeEnvDir tests that the HOME environment variable is set
  2143  // for root containers, sub-containers, and exec'ed processes.
  2144  func TestMultiContainerHomeEnvDir(t *testing.T) {
  2145  	// NOTE: Don't use overlay since we need changes to persist to the temp dir
  2146  	// outside the sandbox.
  2147  	for testName, conf := range configs(t, true /* noOverlay */) {
  2148  		t.Run(testName, func(t *testing.T) {
  2149  
  2150  			rootDir, cleanup, err := testutil.SetupRootDir()
  2151  			if err != nil {
  2152  				t.Fatalf("error creating root dir: %v", err)
  2153  			}
  2154  			defer cleanup()
  2155  			conf.RootDir = rootDir
  2156  
  2157  			// Create temp files we can write the value of $HOME to.
  2158  			homeDirs := map[string]*os.File{}
  2159  			for _, name := range []string{"root", "sub", "exec"} {
  2160  				homeFile, err := ioutil.TempFile(testutil.TmpDir(), name)
  2161  				if err != nil {
  2162  					t.Fatalf("creating temp file: %v", err)
  2163  				}
  2164  				homeDirs[name] = homeFile
  2165  			}
  2166  
  2167  			// We will sleep in the root container in order to ensure that the root
  2168  			// container doesn't terminate before sub containers can be created.
  2169  			rootCmd := []string{"/bin/sh", "-c", fmt.Sprintf(`printf "$HOME" > %s; sleep 1000`, homeDirs["root"].Name())}
  2170  			subCmd := []string{"/bin/sh", "-c", fmt.Sprintf(`printf "$HOME" > %s`, homeDirs["sub"].Name())}
  2171  			execCmd := fmt.Sprintf(`printf "$HOME" > %s`, homeDirs["exec"].Name())
  2172  
  2173  			// Setup the containers, a root container and sub container.
  2174  			specConfig, ids := createSpecs(rootCmd, subCmd)
  2175  			containers, cleanup, err := startContainers(conf, specConfig, ids)
  2176  			if err != nil {
  2177  				t.Fatalf("error starting containers: %v", err)
  2178  			}
  2179  			defer cleanup()
  2180  
  2181  			// Exec into the root container synchronously.
  2182  			if _, err := execute(conf, containers[0], "/bin/sh", "-c", execCmd); err != nil {
  2183  				t.Errorf("error executing %+v: %v", execCmd, err)
  2184  			}
  2185  
  2186  			// Wait for the subcontainer to finish.
  2187  			_, err = containers[1].Wait()
  2188  			if err != nil {
  2189  				t.Errorf("wait on child container: %v", err)
  2190  			}
  2191  
  2192  			// Wait until after `env` has executed.
  2193  			expectedProc := newProcessBuilder().Cmd("sleep").Process()
  2194  			if err := waitForProcess(containers[0], expectedProc); err != nil {
  2195  				t.Errorf("failed to wait for sleep to start: %v", err)
  2196  			}
  2197  
  2198  			// Check the written files.
  2199  			for name, tmpFile := range homeDirs {
  2200  				dirBytes, err := ioutil.ReadAll(tmpFile)
  2201  				if err != nil {
  2202  					t.Fatalf("reading %s temp file: %v", name, err)
  2203  				}
  2204  				got := string(dirBytes)
  2205  
  2206  				want := "/"
  2207  				if got != want {
  2208  					t.Errorf("%s $HOME incorrect: got: %q, want: %q", name, got, want)
  2209  				}
  2210  			}
  2211  
  2212  		})
  2213  	}
  2214  }
  2215  
  2216  func TestMultiContainerEvent(t *testing.T) {
  2217  	tests := []string{"enableCgroups", "disableCgroups"}
  2218  	for _, name := range tests {
  2219  		conf := testutil.TestConfig(t)
  2220  		t.Run(name, func(t *testing.T) {
  2221  			rootDir, cleanup, err := testutil.SetupRootDir()
  2222  			if err != nil {
  2223  				t.Fatalf("error creating root dir: %v", err)
  2224  			}
  2225  			defer cleanup()
  2226  			conf.RootDir = rootDir
  2227  
  2228  			// Setup the containers.
  2229  			sleep := []string{"/bin/sh", "-c", "/bin/sleep 100 | grep 123"}
  2230  			busy := []string{"/bin/bash", "-c", "i=0 ; while true ; do (( i += 1 )) ; done"}
  2231  			quick := []string{"/bin/true"}
  2232  			podSpecs, ids := createSpecs(sleep, busy, quick)
  2233  			if name == "enableCgroups" {
  2234  				mnt := specs.Mount{
  2235  					Destination: "/sys/fs/cgroup",
  2236  					Type:        "cgroup",
  2237  					Options:     nil,
  2238  				}
  2239  				podSpecs[0].Mounts = append(podSpecs[0].Mounts, mnt)
  2240  				podSpecs[1].Mounts = append(podSpecs[1].Mounts, mnt)
  2241  				podSpecs[2].Mounts = append(podSpecs[2].Mounts, mnt)
  2242  			}
  2243  			containers, cleanup, err := startContainers(conf, podSpecs, ids)
  2244  			if err != nil {
  2245  				t.Fatalf("error starting containers: %v", err)
  2246  			}
  2247  			defer cleanup()
  2248  
  2249  			t.Logf("Running container sleep %s", containers[0].ID)
  2250  			t.Logf("Running container busy %s", containers[1].ID)
  2251  			t.Logf("Running container quick %s", containers[2].ID)
  2252  
  2253  			// Wait for containers to start (last container should complete).
  2254  			if err := waitForProcessCount(containers[0], 3); err != nil {
  2255  				t.Errorf("failed to wait for sleep to start: %v", err)
  2256  			}
  2257  			if err := waitForProcessCount(containers[1], 1); err != nil {
  2258  				t.Errorf("failed to wait for bash to start: %v", err)
  2259  			}
  2260  			if ws, err := containers[2].Wait(); err != nil || ws != 0 {
  2261  				t.Fatalf("Container.Wait, status: %v, err: %v", ws, err)
  2262  			}
  2263  
  2264  			// Check events for running containers.
  2265  			for i, cont := range containers[:2] {
  2266  				ret, err := cont.Event()
  2267  				if err != nil {
  2268  					t.Errorf("Container.Event(%q): %v", cont.ID, err)
  2269  				}
  2270  				evt := ret.Event
  2271  				if want := "stats"; evt.Type != want {
  2272  					t.Errorf("Wrong event type, cid: %q, want: %s, got: %s", cont.ID, want, evt.Type)
  2273  				}
  2274  				if cont.ID != evt.ID {
  2275  					t.Errorf("Wrong container ID, want: %s, got: %s", cont.ID, evt.ID)
  2276  				}
  2277  
  2278  				// container[0] expects 3 processes, while container[1] expects 1.
  2279  				wantPids := 3
  2280  				if i == 1 {
  2281  					wantPids = 1
  2282  				}
  2283  				if got := evt.Data.Pids.Current; got != uint64(wantPids) {
  2284  					t.Errorf("Wrong number of PIDs, cid: %q, want: %d, got: %d", cont.ID, wantPids, got)
  2285  				}
  2286  
  2287  				switch i {
  2288  				case 0:
  2289  					if name != "enableCgroups" && evt.Data.Memory.Usage.Usage != uint64(0) {
  2290  						t.Errorf("root container should report 0 memory usage, got: %v", evt.Data.Memory.Usage.Usage)
  2291  					}
  2292  				case 1:
  2293  					if evt.Data.Memory.Usage.Usage == uint64(0) {
  2294  						t.Error("sub-container should report non-zero memory usage")
  2295  					}
  2296  				}
  2297  
  2298  				// The exited container should always have a usage of zero.
  2299  				if exited := ret.ContainerUsage[containers[2].ID]; exited != 0 {
  2300  					t.Errorf("Exited container should report 0 CPU usage, got: %d", exited)
  2301  				}
  2302  			}
  2303  
  2304  			// Check that CPU reported by busy container is higher than sleep.
  2305  			cb := func() error {
  2306  				sleepEvt, err := containers[0].Event()
  2307  				if err != nil {
  2308  					return &backoff.PermanentError{Err: err}
  2309  				}
  2310  				sleepUsage := sleepEvt.Event.Data.CPU.Usage.Total
  2311  
  2312  				busyEvt, err := containers[1].Event()
  2313  				if err != nil {
  2314  					return &backoff.PermanentError{Err: err}
  2315  				}
  2316  				busyUsage := busyEvt.Event.Data.CPU.Usage.Total
  2317  
  2318  				if busyUsage <= sleepUsage {
  2319  					t.Logf("Busy container usage lower than sleep (busy: %d, sleep: %d), retrying...", busyUsage, sleepUsage)
  2320  					return fmt.Errorf("busy container should have higher usage than sleep, busy: %d, sleep: %d", busyUsage, sleepUsage)
  2321  				}
  2322  				return nil
  2323  			}
  2324  			// Give time for busy container to run and use more CPU than sleep.
  2325  			if err := testutil.Poll(cb, 10*time.Second); err != nil {
  2326  				t.Fatal(err)
  2327  			}
  2328  
  2329  			// Check that stopped and destroyed containers return error.
  2330  			if err := containers[1].Destroy(); err != nil {
  2331  				t.Fatalf("container.Destroy: %v", err)
  2332  			}
  2333  			for _, cont := range containers[1:] {
  2334  				if _, err := cont.Event(); err == nil {
  2335  					t.Errorf("Container.Event() should have failed, cid: %q, state: %v", cont.ID, cont.Status)
  2336  				}
  2337  			}
  2338  		})
  2339  	}
  2340  }
  2341  
  2342  // Tests that duplicate variables in the spec are merged into a single one.
  2343  func TestDuplicateEnvVariable(t *testing.T) {
  2344  	conf := testutil.TestConfig(t)
  2345  
  2346  	rootDir, cleanup, err := testutil.SetupRootDir()
  2347  	if err != nil {
  2348  		t.Fatalf("error creating root dir: %v", err)
  2349  	}
  2350  	defer cleanup()
  2351  	conf.RootDir = rootDir
  2352  
  2353  	// Create files to dump `env` output.
  2354  	files := [3]*os.File{}
  2355  	for i := 0; i < len(files); i++ {
  2356  		var err error
  2357  		files[i], err = ioutil.TempFile(testutil.TmpDir(), "env-var-test")
  2358  		if err != nil {
  2359  			t.Fatalf("creating temp file: %v", err)
  2360  		}
  2361  		defer files[i].Close()
  2362  		defer os.Remove(files[i].Name())
  2363  	}
  2364  
  2365  	// Setup the containers. Use root container to test exec too.
  2366  	cmd1 := fmt.Sprintf("env > %q; sleep 1000", files[0].Name())
  2367  	cmd2 := fmt.Sprintf("env > %q", files[1].Name())
  2368  	cmdExec := fmt.Sprintf("env > %q", files[2].Name())
  2369  	testSpecs, ids := createSpecs([]string{"/bin/sh", "-c", cmd1}, []string{"/bin/sh", "-c", cmd2})
  2370  	testSpecs[0].Process.Env = append(testSpecs[0].Process.Env, "VAR=foo", "VAR=bar")
  2371  	testSpecs[1].Process.Env = append(testSpecs[1].Process.Env, "VAR=foo", "VAR=bar")
  2372  
  2373  	containers, cleanup, err := startContainers(conf, testSpecs, ids)
  2374  	if err != nil {
  2375  		t.Fatalf("error starting containers: %v", err)
  2376  	}
  2377  	defer cleanup()
  2378  
  2379  	// Wait until after `env` has executed.
  2380  	expectedProc := newProcessBuilder().Cmd("sleep").Process()
  2381  	if err := waitForProcess(containers[0], expectedProc); err != nil {
  2382  		t.Errorf("failed to wait for sleep to start: %v", err)
  2383  	}
  2384  	if ws, err := containers[1].Wait(); err != nil {
  2385  		t.Errorf("failed to wait container 1: %v", err)
  2386  	} else if es := ws.ExitStatus(); es != 0 {
  2387  		t.Errorf("container %s exited with non-zero status: %v", containers[1].ID, es)
  2388  	}
  2389  
  2390  	execArgs := &control.ExecArgs{
  2391  		Filename: "/bin/sh",
  2392  		Argv:     []string{"/bin/sh", "-c", cmdExec},
  2393  		Envv:     []string{"VAR=foo", "VAR=bar"},
  2394  	}
  2395  	if ws, err := containers[0].executeSync(conf, execArgs); err != nil || ws.ExitStatus() != 0 {
  2396  		t.Fatalf("exec failed, ws: %v, err: %v", ws, err)
  2397  	}
  2398  
  2399  	// Now read and check that none of the env has repeated values.
  2400  	for _, file := range files {
  2401  		out, err := ioutil.ReadAll(file)
  2402  		if err != nil {
  2403  			t.Fatal(err)
  2404  		}
  2405  		t.Logf("Checking env %q:\n%s", file.Name(), out)
  2406  		envs := make(map[string]string)
  2407  		for _, line := range strings.Split(string(out), "\n") {
  2408  			if len(line) == 0 {
  2409  				continue
  2410  			}
  2411  			envVar := strings.SplitN(line, "=", 2)
  2412  			if len(envVar) != 2 {
  2413  				t.Fatalf("invalid env variable: %s", line)
  2414  			}
  2415  			key := envVar[0]
  2416  			if val, ok := envs[key]; ok {
  2417  				t.Errorf("env variable %q is duplicated: %q and %q", key, val, envVar[1])
  2418  			}
  2419  			envs[key] = envVar[1]
  2420  		}
  2421  		if _, ok := envs["VAR"]; !ok {
  2422  			t.Errorf("variable VAR missing: %v", envs)
  2423  		}
  2424  	}
  2425  }
  2426  
  2427  // Test that /dev/shm can be shared between containers.
  2428  func TestMultiContainerShm(t *testing.T) {
  2429  	conf := testutil.TestConfig(t)
  2430  
  2431  	rootDir, cleanup, err := testutil.SetupRootDir()
  2432  	if err != nil {
  2433  		t.Fatalf("error creating root dir: %v", err)
  2434  	}
  2435  	defer cleanup()
  2436  	conf.RootDir = rootDir
  2437  
  2438  	testSpecs, ids := createSpecs(sleepCmd, sleepCmd)
  2439  
  2440  	sharedMount := specs.Mount{
  2441  		Destination: "/dev/shm",
  2442  		Source:      "/some/path",
  2443  		Type:        "tmpfs",
  2444  	}
  2445  
  2446  	// Add shared /dev/shm mount to all containers.
  2447  	for _, spec := range testSpecs {
  2448  		spec.Mounts = append(spec.Mounts, sharedMount)
  2449  	}
  2450  
  2451  	createSharedMount(sharedMount, "devshm", testSpecs...)
  2452  
  2453  	containers, cleanup, err := startContainers(conf, testSpecs, ids)
  2454  	if err != nil {
  2455  		t.Fatalf("error starting containers: %v", err)
  2456  	}
  2457  	defer cleanup()
  2458  
  2459  	// Write file to shared /dev/shm directory in one container.
  2460  	const output = "/dev/shm/file.txt"
  2461  	exec0 := fmt.Sprintf("echo 123 > %s", output)
  2462  	if ws, err := execute(conf, containers[0], "/bin/sh", "-c", exec0); err != nil || ws.ExitStatus() != 0 {
  2463  		t.Fatalf("exec failed, ws: %v, err: %v", ws, err)
  2464  	}
  2465  
  2466  	// Check that file can be found in the other container.
  2467  	out, err := executeCombinedOutput(conf, containers[1], nil, "/bin/cat", output)
  2468  	if err != nil {
  2469  		t.Fatalf("exec failed: %v", err)
  2470  	}
  2471  	if want := "123\n"; string(out) != want {
  2472  		t.Fatalf("wrong output, want: %q, got: %v", want, out)
  2473  	}
  2474  }
  2475  
  2476  // Test that using file-backed overlay does not lead to memory leak or leaks
  2477  // in the host-file backing the overlay.
  2478  func TestMultiContainerOverlayLeaks(t *testing.T) {
  2479  	conf := testutil.TestConfig(t)
  2480  	app, err := testutil.FindFile("test/cmd/test_app/test_app")
  2481  	if err != nil {
  2482  		t.Fatal("error finding test_app:", err)
  2483  	}
  2484  
  2485  	rootDir, cleanup, err := testutil.SetupRootDir()
  2486  	if err != nil {
  2487  		t.Fatalf("error creating root dir: %v", err)
  2488  	}
  2489  	defer cleanup()
  2490  	conf.RootDir = rootDir
  2491  
  2492  	// Configure root overlay backed by rootfs itself.
  2493  	conf.Overlay2.Set("root:self")
  2494  
  2495  	// Since all containers share the same conf.RootDir, and root filesystems
  2496  	// have overlay enabled, the root directory should never be modified. Hence,
  2497  	// creating files at the same locations should not lead to EEXIST error.
  2498  	createFsTree := []string{"/app", "fsTreeCreate", "--depth=10", "--file-per-level=10", "--file-size=4096"}
  2499  	testSpecs, ids := createSpecs(sleepCmd, createFsTree, createFsTree, createFsTree)
  2500  	for i, s := range testSpecs {
  2501  		if i == 0 {
  2502  			// Root container just sleeps, so should be fine.
  2503  			continue
  2504  		}
  2505  		// For subcontainers, make sure the root filesystems is writable because
  2506  		// the app will create files in it.
  2507  		s.Root.Readonly = false
  2508  		// createSpecs assigns the host's root as the container's root. But self
  2509  		// overlay2 medium creates the filestore file inside container's root. That
  2510  		// will fail. So create a temporary writable directory to represent the
  2511  		// container's root filesystem and copy the app binary there.
  2512  		contRoot, rootfsCU, err := testutil.SetupRootDir()
  2513  		if err != nil {
  2514  			t.Fatalf("error creating container's root filesystem: %v", err)
  2515  		}
  2516  		defer rootfsCU()
  2517  		s.Root.Path = contRoot
  2518  		appDst := path.Join(contRoot, "app")
  2519  		if err := copyFile(app, appDst); err != nil {
  2520  			t.Fatalf("error copying app binary from %q to %q: %v", app, appDst, err)
  2521  		}
  2522  	}
  2523  
  2524  	// Start the containers.
  2525  	conts, cleanup, err := startContainers(conf, testSpecs, ids)
  2526  	if err != nil {
  2527  		t.Fatalf("error starting containers: %v", err)
  2528  	}
  2529  	defer cleanup()
  2530  
  2531  	sandboxID := conts[0].Sandbox.ID
  2532  	for i, c := range conts {
  2533  		if i == 0 {
  2534  			// Don't wait for the root container which just sleeps.
  2535  			continue
  2536  		}
  2537  		// Wait for the sub-container to stop.
  2538  		if ws, err := c.Wait(); err != nil {
  2539  			t.Errorf("failed to wait for subcontainer number %d: %v", i, err)
  2540  		} else if es := ws.ExitStatus(); es != 0 {
  2541  			t.Errorf("subcontainer number %d exited with non-zero status %d", i, es)
  2542  		}
  2543  	}
  2544  
  2545  	// Give the reclaimer goroutine some time to reclaim.
  2546  	time.Sleep(3 * time.Second)
  2547  
  2548  	for i, s := range testSpecs {
  2549  		if i == 0 {
  2550  			continue
  2551  		}
  2552  
  2553  		// Stat filestoreFile to see its usage. It should have been cleaned up.
  2554  		filestoreFile := boot.SelfFilestorePath(s.Root.Path, sandboxID)
  2555  		var stat unix.Stat_t
  2556  		if err := unix.Stat(filestoreFile, &stat); err != nil {
  2557  			t.Errorf("unix.Stat(%q) failed for rootfs filestore: %v", filestoreFile, err)
  2558  			continue
  2559  		}
  2560  		if stat.Blocks > 0 {
  2561  			t.Errorf("rootfs filestore file %q for sub container %d is not cleaned up, has %d blocks", filestoreFile, i, stat.Blocks)
  2562  		}
  2563  	}
  2564  }
  2565  
  2566  func copyFile(src, dst string) error {
  2567  	bytesRead, err := ioutil.ReadFile(src)
  2568  	if err != nil {
  2569  		return err
  2570  	}
  2571  
  2572  	return ioutil.WriteFile(dst, bytesRead, 0755)
  2573  }
  2574  
  2575  // Test that spawning many subcontainers that do a lot of filesystem operations
  2576  // does not lead to memory leaks.
  2577  func TestMultiContainerMemoryLeakStress(t *testing.T) {
  2578  	conf := testutil.TestConfig(t)
  2579  	app, err := testutil.FindFile("test/cmd/test_app/test_app")
  2580  	if err != nil {
  2581  		t.Fatal("error finding test_app:", err)
  2582  	}
  2583  
  2584  	rootDir, cleanup, err := testutil.SetupRootDir()
  2585  	if err != nil {
  2586  		t.Fatalf("error creating root dir: %v", err)
  2587  	}
  2588  	defer cleanup()
  2589  	conf.RootDir = rootDir
  2590  
  2591  	// Configure root overlay (backed by memory) so that containers can create
  2592  	// files in the root directory.
  2593  	conf.Overlay2.Set("root:memory")
  2594  
  2595  	// Subcontainers will do a lot of filesystem work. Create a lot of them.
  2596  	createFsTree := []string{app, "fsTreeCreate", "--depth=10", "--file-per-level=10", "--file-size=1048576"}
  2597  	const (
  2598  		warmupContainers                   = 5
  2599  		stressContainers                   = 25
  2600  		nominalReclaimDurationPerContainer = time.Second
  2601  		maxReclaimDurationPerContainer     = 5 * time.Second
  2602  	)
  2603  	cmds := make([][]string, 0, warmupContainers+stressContainers+1)
  2604  	cmds = append(cmds, sleepCmd)
  2605  	for i := 0; i < warmupContainers+stressContainers; i++ {
  2606  		cmds = append(cmds, createFsTree)
  2607  	}
  2608  	testSpecs, ids := createSpecs(cmds...)
  2609  	// Make sure none of the root filesystems are read-only, otherwise we won't
  2610  	// be able to create the file.
  2611  	for _, s := range testSpecs {
  2612  		s.Root.Readonly = false
  2613  	}
  2614  
  2615  	// Start the root container.
  2616  	rootCont, cleanup, err := startContainers(conf, testSpecs[:1], ids[:1])
  2617  	if err != nil {
  2618  		t.Fatalf("error starting containers: %v", err)
  2619  	}
  2620  	defer cleanup()
  2621  
  2622  	// Warm up the sandbox.
  2623  	warmUpContainers, cleanUp2, err := startContainers(conf, testSpecs[1:1+warmupContainers], ids[1:1+warmupContainers])
  2624  	if err != nil {
  2625  		t.Fatalf("error starting containers: %v", err)
  2626  	}
  2627  	defer cleanUp2()
  2628  	// Wait for all warm up subcontainers to stop.
  2629  	for i, c := range warmUpContainers {
  2630  		// Wait for the sub-container to stop.
  2631  		if ws, err := c.Wait(); err != nil {
  2632  			t.Errorf("failed to wait for warm up subcontainer number %d: %v", i, err)
  2633  		} else if es := ws.ExitStatus(); es != 0 {
  2634  			t.Errorf("warm up subcontainer number %d exited with non-zero status %d", i, es)
  2635  		}
  2636  	}
  2637  
  2638  	// Give the reclaimer goroutine some time to reclaim.
  2639  	time.Sleep(warmupContainers * nominalReclaimDurationPerContainer)
  2640  
  2641  	// Measure the memory usage after the warm up.
  2642  	// It's possible, though unlikely, that reclaiming is unfinished; this is
  2643  	// harmless because we tolerate newUsage being lower than oldUsage below.
  2644  	oldUsage, err := rootCont[0].Sandbox.Usage(true /* Full */)
  2645  	if err != nil {
  2646  		t.Fatalf("sandbox.Usage failed: %v", err)
  2647  	}
  2648  
  2649  	// Hammer the sandbox with sub containers.
  2650  	subConts, cleanup3, err := startContainers(conf, testSpecs[1+warmupContainers:1+warmupContainers+stressContainers], ids[1+warmupContainers:1+warmupContainers+stressContainers])
  2651  	if err != nil {
  2652  		t.Fatalf("error starting containers: %v", err)
  2653  	}
  2654  	defer cleanup3()
  2655  	// Wait for all subcontainers to stop.
  2656  	for i, c := range subConts {
  2657  		if ws, err := c.Wait(); err != nil {
  2658  			t.Errorf("failed to wait for subcontainer number %d: %v", i, err)
  2659  		} else if es := ws.ExitStatus(); es != 0 {
  2660  			t.Errorf("subcontainer number %d exited with non-zero status %d", i, es)
  2661  		}
  2662  	}
  2663  
  2664  	// Sample memory usage until all fields are no more than 5% greater than
  2665  	// after warmup.
  2666  	deadline := time.Now().Add(stressContainers * maxReclaimDurationPerContainer)
  2667  	oldUsageV := reflect.ValueOf(oldUsage)
  2668  	for {
  2669  		newUsage, err := rootCont[0].Sandbox.Usage(true /* Full */)
  2670  		if err != nil {
  2671  			t.Fatalf("sandbox.Usage failed: %v", err)
  2672  		}
  2673  		allFieldsOk := true
  2674  		// Note that all fields of control.MemoryUsage are exported and uint64.
  2675  		newUsageV := reflect.ValueOf(newUsage)
  2676  		numFields := oldUsageV.NumField()
  2677  		for i := 0; i < numFields; i++ {
  2678  			name := oldUsageV.Type().Field(i).Name
  2679  			oldVal := oldUsageV.Field(i).Interface().(uint64)
  2680  			newVal := newUsageV.Field(i).Interface().(uint64)
  2681  			if newVal <= oldVal {
  2682  				continue
  2683  			}
  2684  			if ((newVal-oldVal)*100)/oldVal > 5 {
  2685  				t.Logf("%s usage increased by more than 5%%: old=%d, new=%d", name, oldVal, newVal)
  2686  				allFieldsOk = false
  2687  			}
  2688  		}
  2689  		if allFieldsOk {
  2690  			break
  2691  		}
  2692  		if time.Now().After(deadline) {
  2693  			t.Fatalf("Memory usage after stress containers exited did not converge to memory usage after warmup")
  2694  		}
  2695  		time.Sleep(time.Second)
  2696  	}
  2697  }
  2698  
  2699  // TestCheckpointRestore tests that checkpoint/restore works
  2700  // with multi-containers.
  2701  func TestMultiContainerCheckpointRestore(t *testing.T) {
  2702  	// Skip overlay because test requires writing to host file.
  2703  	for name, conf := range configs(t, true /* noOverlay */) {
  2704  		t.Run(name, func(t *testing.T) {
  2705  			rootDir, cleanup, err := testutil.SetupRootDir()
  2706  			if err != nil {
  2707  				t.Fatalf("error creating root dir: %v", err)
  2708  			}
  2709  			defer cleanup()
  2710  			conf.RootDir = rootDir
  2711  
  2712  			dir, err := os.MkdirTemp(testutil.TmpDir(), "checkpoint-test")
  2713  			if err != nil {
  2714  				t.Fatalf("os.MkdirTemp() failed: %v", err)
  2715  			}
  2716  			defer os.RemoveAll(dir)
  2717  			if err := os.Chmod(dir, 0777); err != nil {
  2718  				t.Fatalf("error chmoding file: %q, %v", dir, err)
  2719  			}
  2720  
  2721  			outputPath := filepath.Join(dir, "output")
  2722  			outputFile, err := createWriteableOutputFile(outputPath)
  2723  			if err != nil {
  2724  				t.Fatalf("error creating output file: %v", err)
  2725  			}
  2726  			defer outputFile.Close()
  2727  
  2728  			// Create 3 containers. First requires a restore call, second requires a restoreSubcontainer
  2729  			// that needs to wait, third issues a restoreSubcontainer call that actually restores the
  2730  			// entire sandbox.
  2731  			script := fmt.Sprintf("for ((i=0; ;i++)); do echo $i >> %q; sleep 1; done", outputPath)
  2732  			testSpecs, ids := createSpecs(
  2733  				sleepCmd,
  2734  				[]string{"bash", "-c", script},
  2735  				sleepCmd,
  2736  			)
  2737  
  2738  			conts, cleanup, err := startContainers(conf, testSpecs, ids)
  2739  			if err != nil {
  2740  				t.Fatalf("error starting containers: %v", err)
  2741  			}
  2742  			defer cleanup()
  2743  
  2744  			// Wait until application has ran.
  2745  			if err := waitForFileNotEmpty(outputFile); err != nil {
  2746  				t.Fatalf("Failed to wait for output file: %v", err)
  2747  			}
  2748  
  2749  			// Checkpoint root container; save state into new file.
  2750  			if err := conts[0].Checkpoint(dir, false /* direct */, statefile.Options{Compression: statefile.CompressionLevelFlateBestSpeed}, pgalloc.SaveOpts{}); err != nil {
  2751  				t.Fatalf("error checkpointing container to empty file: %v", err)
  2752  			}
  2753  			defer os.RemoveAll(dir)
  2754  
  2755  			lastNum, err := readOutputNum(outputPath, -1)
  2756  			if err != nil {
  2757  				t.Fatalf("error with outputFile: %v", err)
  2758  			}
  2759  
  2760  			// Delete and recreate file before restoring.
  2761  			if err := os.Remove(outputPath); err != nil {
  2762  				t.Fatalf("error removing file")
  2763  			}
  2764  			outputFile2, err := createWriteableOutputFile(outputPath)
  2765  			if err != nil {
  2766  				t.Fatalf("error creating output file: %v", err)
  2767  			}
  2768  			defer outputFile2.Close()
  2769  
  2770  			// Restore into a new container with different ID (e.g. clone). Keep the
  2771  			// initial container running to ensure no conflict with it.
  2772  			newIds := make([]string, 0, len(ids))
  2773  			for range ids {
  2774  				newIds = append(newIds, testutil.RandomContainerID())
  2775  			}
  2776  			for _, specs := range testSpecs[1:] {
  2777  				specs.Annotations[specutils.ContainerdSandboxIDAnnotation] = newIds[0]
  2778  			}
  2779  			conts2, cleanup2, err := restoreContainers(conf, testSpecs, newIds, dir)
  2780  			if err != nil {
  2781  				t.Fatalf("error restoring containers: %v", err)
  2782  			}
  2783  			defer cleanup2()
  2784  
  2785  			// Wait until application has ran.
  2786  			if err := waitForFileNotEmpty(outputFile2); err != nil {
  2787  				t.Fatalf("Failed to wait for output file: %v", err)
  2788  			}
  2789  
  2790  			firstNum, err := readOutputNum(outputPath, 0)
  2791  			if err != nil {
  2792  				t.Fatalf("error with outputFile: %v", err)
  2793  			}
  2794  
  2795  			// Check that lastNum is one less than firstNum and that the container
  2796  			// picks up from where it left off.
  2797  			if lastNum+1 != firstNum {
  2798  				t.Errorf("error numbers not in order, previous: %d, next: %d", lastNum, firstNum)
  2799  			}
  2800  
  2801  			for _, cont := range conts2 {
  2802  				state := cont.State()
  2803  				if state.Status != Running {
  2804  					t.Fatalf("container %v is not running: %v", cont.ID, state.Status)
  2805  				}
  2806  			}
  2807  
  2808  			// Restore into a new container with different ID (e.g. clone). It
  2809  			// requires the original container to cease to exist because they share
  2810  			// the same identity.
  2811  			cleanup2()
  2812  			conts2 = nil
  2813  
  2814  			// Delete and recreate file before restoring.
  2815  			if err := os.Remove(outputPath); err != nil {
  2816  				t.Fatalf("error removing file")
  2817  			}
  2818  			outputFile3, err := createWriteableOutputFile(outputPath)
  2819  			if err != nil {
  2820  				t.Fatalf("error creating output file: %v", err)
  2821  			}
  2822  			defer outputFile3.Close()
  2823  
  2824  			_, cleanup3, err := restoreContainers(conf, testSpecs, newIds, dir)
  2825  			if err != nil {
  2826  				t.Fatalf("error creating containers: %v", err)
  2827  			}
  2828  			defer cleanup3()
  2829  
  2830  			// Wait until application has ran.
  2831  			if err := waitForFileNotEmpty(outputFile3); err != nil {
  2832  				t.Fatalf("Failed to wait for output file: %v", err)
  2833  			}
  2834  
  2835  			firstNum2, err := readOutputNum(outputPath, 0)
  2836  			if err != nil {
  2837  				t.Fatalf("error with outputFile: %v", err)
  2838  			}
  2839  
  2840  			// Check that lastNum is one less than firstNum and that the container
  2841  			// picks up from where it left off.
  2842  			if lastNum+1 != firstNum2 {
  2843  				t.Errorf("error numbers not in order, previous: %d, next: %d", lastNum, firstNum2)
  2844  			}
  2845  		})
  2846  	}
  2847  }
  2848  
  2849  // Tests cgroups are mounted in only containers which have a cgroup mount in
  2850  // the spec.
  2851  func TestMultiContainerCgroups(t *testing.T) {
  2852  	_, err := testutil.FindFile("test/cmd/test_app/test_app")
  2853  	if err != nil {
  2854  		t.Fatal("error finding test_app:", err)
  2855  	}
  2856  
  2857  	for name, conf := range configs(t, false /* noOverlay */) {
  2858  		t.Run(name, func(t *testing.T) {
  2859  			rootDir, cleanup, err := testutil.SetupRootDir()
  2860  			if err != nil {
  2861  				t.Fatalf("error creating root dir: %v", err)
  2862  			}
  2863  			defer cleanup()
  2864  			conf.RootDir = rootDir
  2865  
  2866  			podSpecs, ids := createSpecs(sleepCmd, sleepCmd)
  2867  			podSpecs[1].Linux = &specs.Linux{
  2868  				Namespaces: []specs.LinuxNamespace{{Type: "pid"}},
  2869  			}
  2870  
  2871  			mnt0 := specs.Mount{
  2872  				Destination: "/sys/fs/cgroup",
  2873  				Type:        "cgroup",
  2874  				Options:     nil,
  2875  			}
  2876  			// Append cgroups mount for only one container.
  2877  			podSpecs[0].Mounts = append(podSpecs[0].Mounts, mnt0)
  2878  
  2879  			createSharedMount(mnt0, "test-mount", podSpecs...)
  2880  			containers, cleanup, err := startContainers(conf, podSpecs, ids)
  2881  			if err != nil {
  2882  				t.Fatalf("error starting containers: %v", err)
  2883  			}
  2884  			defer cleanup()
  2885  
  2886  			ctrlFileMap := map[string]string{
  2887  				"cpu":     "cpu.shares",
  2888  				"cpuacct": "cpuacct.usage",
  2889  				"cpuset":  "cpuset.cpus",
  2890  				"devices": "devices.allow",
  2891  				"memory":  "memory.usage_in_bytes",
  2892  				"pids":    "pids.current",
  2893  			}
  2894  			for ctrl, f := range ctrlFileMap {
  2895  				ctrlRoot := control.CgroupControlFile{
  2896  					Controller: ctrl,
  2897  					Path:       "/",
  2898  					Name:       f,
  2899  				}
  2900  				ctrl0 := control.CgroupControlFile{
  2901  					Controller: ctrl,
  2902  					Path:       "/" + containers[0].ID,
  2903  					Name:       f,
  2904  				}
  2905  				ctrl1 := control.CgroupControlFile{
  2906  					Controller: ctrl,
  2907  					Path:       "/" + containers[1].ID,
  2908  					Name:       f,
  2909  				}
  2910  
  2911  				if _, err := containers[0].Sandbox.CgroupsReadControlFile(ctrlRoot); err != nil {
  2912  					t.Fatalf("error root cgroup mount for %s not found %v", ctrl, err)
  2913  				}
  2914  				if _, err := containers[0].Sandbox.CgroupsReadControlFile(ctrl0); err != nil {
  2915  					t.Fatalf("error %s cgroups not mounted in container0 %v", ctrl, err)
  2916  				}
  2917  				if _, err := containers[1].Sandbox.CgroupsReadControlFile(ctrl1); err == nil {
  2918  					t.Fatalf("error %s cgroups mounted in container1 even when the spec does not have a cgroup mount %v", ctrl, err)
  2919  				}
  2920  			}
  2921  		})
  2922  	}
  2923  }
  2924  
  2925  // Tests the cgroups are mounted in the containers when the spec has a cgroup
  2926  // mount. Also, checks memory usage stats from cgroups work correctly when the
  2927  // memory is increased for one container.
  2928  func TestMultiContainerCgroupsMemoryUsage(t *testing.T) {
  2929  	_, err := testutil.FindFile("test/cmd/test_app/test_app")
  2930  	if err != nil {
  2931  		t.Fatal("error finding test_app:", err)
  2932  	}
  2933  
  2934  	for name, conf := range configs(t, false /* noOverlay */) {
  2935  		t.Run(name, func(t *testing.T) {
  2936  			rootDir, cleanup, err := testutil.SetupRootDir()
  2937  			if err != nil {
  2938  				t.Fatalf("error creating root dir: %v", err)
  2939  			}
  2940  			defer cleanup()
  2941  			conf.RootDir = rootDir
  2942  
  2943  			podSpecs, ids := createSpecs(sleepCmd, sleepCmd)
  2944  			podSpecs[1].Linux = &specs.Linux{
  2945  				Namespaces: []specs.LinuxNamespace{{Type: "pid"}},
  2946  			}
  2947  
  2948  			mnt0 := specs.Mount{
  2949  				Destination: "/sys/fs/cgroup",
  2950  				Type:        "cgroup",
  2951  				Options:     nil,
  2952  			}
  2953  			// Append cgroups mount for both containers.
  2954  			podSpecs[0].Mounts = append(podSpecs[0].Mounts, mnt0)
  2955  			podSpecs[1].Mounts = append(podSpecs[1].Mounts, mnt0)
  2956  
  2957  			createSharedMount(mnt0, "test-mount", podSpecs...)
  2958  			containers, cleanup, err := startContainers(conf, podSpecs, ids)
  2959  			if err != nil {
  2960  				t.Fatalf("error starting containers: %v", err)
  2961  			}
  2962  			defer cleanup()
  2963  
  2964  			ctrlRoot := control.CgroupControlFile{
  2965  				Controller: "memory",
  2966  				Path:       "/",
  2967  				Name:       "memory.usage_in_bytes",
  2968  			}
  2969  			ctrl0 := control.CgroupControlFile{
  2970  				Controller: "memory",
  2971  				Path:       "/" + containers[0].ID,
  2972  				Name:       "memory.usage_in_bytes",
  2973  			}
  2974  			ctrl1 := control.CgroupControlFile{
  2975  				Controller: "memory",
  2976  				Path:       "/" + containers[1].ID,
  2977  				Name:       "memory.usage_in_bytes",
  2978  			}
  2979  
  2980  			usageTotal, err := containers[0].Sandbox.CgroupsReadControlFile(ctrlRoot)
  2981  			if err != nil {
  2982  				t.Fatalf("error getting total usage %v", err)
  2983  			}
  2984  			usage0, err := containers[0].Sandbox.CgroupsReadControlFile(ctrl0)
  2985  			if err != nil {
  2986  				t.Fatalf("error getting container0 usage %v", err)
  2987  			}
  2988  			usage1, err := containers[1].Sandbox.CgroupsReadControlFile(ctrl1)
  2989  			if err != nil {
  2990  				t.Fatalf("error getting container1 usage %v", err)
  2991  			}
  2992  			if usageTotal < (usage0 + usage1) {
  2993  				t.Fatalf("error total usage is less total %v container0_usage %v container1_usage %v", usageTotal, usage0, usage1)
  2994  			}
  2995  
  2996  			// Kill the second container and check that usage has decreased.
  2997  			if err := containers[1].SignalContainer(unix.SIGKILL, true); err != nil {
  2998  				t.Fatalf("error killing container %q: %v", containers[1].ID, err)
  2999  			}
  3000  			if _, err := containers[1].Wait(); err != nil {
  3001  				t.Fatalf("error waiting forcontainer %q: %v", containers[1].ID, err)
  3002  			}
  3003  
  3004  			newUsageTotal, err := containers[0].Sandbox.CgroupsReadControlFile(ctrlRoot)
  3005  			if err != nil {
  3006  				t.Fatalf("error getting total usage %v", err)
  3007  			}
  3008  			if newUsageTotal >= usageTotal {
  3009  				t.Fatalf("error new total usage %v is not less than old total usage %v", newUsageTotal, usageTotal)
  3010  			}
  3011  		})
  3012  	}
  3013  }