gvisor.dev/gvisor@v0.0.0-20240520182842-f9d4d51c7e0f/runsc/container/console_test.go (about) 1 // Copyright 2018 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package container 16 17 import ( 18 "bytes" 19 "fmt" 20 "io" 21 "math/rand" 22 "os" 23 "path/filepath" 24 "testing" 25 "time" 26 27 "github.com/kr/pty" 28 "golang.org/x/sys/unix" 29 "gvisor.dev/gvisor/pkg/sentry/control" 30 "gvisor.dev/gvisor/pkg/sync" 31 "gvisor.dev/gvisor/pkg/test/testutil" 32 "gvisor.dev/gvisor/pkg/unet" 33 ) 34 35 // socketPath creates a path inside bundleDir and ensures that the returned 36 // path is under 108 charactors (the unix socket path length limit), 37 // relativizing the path if necessary. 38 func socketPath(bundleDir string) (string, error) { 39 num := rand.Intn(10000) 40 path := filepath.Join(bundleDir, fmt.Sprintf("socket-%4d", num)) 41 const maxPathLen = 108 42 if len(path) <= maxPathLen { 43 return path, nil 44 } 45 46 // Path is too large, try to make it smaller. 47 cwd, err := os.Getwd() 48 if err != nil { 49 return "", fmt.Errorf("error getting cwd: %v", err) 50 } 51 path, err = filepath.Rel(cwd, path) 52 if err != nil { 53 return "", fmt.Errorf("error getting relative path for %q from cwd %q: %v", path, cwd, err) 54 } 55 if len(path) > maxPathLen { 56 return "", fmt.Errorf("could not get socket path under length limit %d: %s", maxPathLen, path) 57 } 58 return path, nil 59 } 60 61 // createConsoleSocket creates a socket at the given path that will receive a 62 // console fd from the sandbox. If an error occurs, t.Fatalf will be called. 63 // The function returning should be deferred as cleanup. 64 func createConsoleSocket(t *testing.T, path string) (*unet.ServerSocket, func()) { 65 t.Helper() 66 srv, err := unet.BindAndListen(path, false) 67 if err != nil { 68 t.Fatalf("error binding and listening to socket %q: %v", path, err) 69 } 70 71 cleanup := func() { 72 // Log errors; nothing can be done. 73 if err := srv.Close(); err != nil { 74 t.Logf("error closing socket %q: %v", path, err) 75 } 76 if err := os.Remove(path); err != nil { 77 t.Logf("error removing socket %q: %v", path, err) 78 } 79 } 80 81 return srv, cleanup 82 } 83 84 // receiveConsolePTY accepts a connection on the server socket and reads fds. 85 // It fails if more than one FD is received, or if the FD is not a PTY. It 86 // returns the PTY master file. 87 func receiveConsolePTY(srv *unet.ServerSocket) (*os.File, error) { 88 sock, err := srv.Accept() 89 if err != nil { 90 return nil, fmt.Errorf("error accepting socket connection: %v", err) 91 } 92 93 // Allow 3 fds to be received. We only expect 1. 94 r := sock.Reader(true /* blocking */) 95 r.EnableFDs(1) 96 97 // The socket is closed right after sending the FD, so EOF is 98 // an allowed error. 99 b := [][]byte{{}} 100 if _, err := r.ReadVec(b); err != nil && err != io.EOF { 101 return nil, fmt.Errorf("error reading from socket connection: %v", err) 102 } 103 104 // We should have gotten a control message. 105 fds, err := r.ExtractFDs() 106 if err != nil { 107 return nil, fmt.Errorf("error extracting fds from socket connection: %v", err) 108 } 109 if len(fds) != 1 { 110 return nil, fmt.Errorf("got %d fds from socket, wanted 1", len(fds)) 111 } 112 113 // Verify that the fd is a terminal. 114 if _, err := unix.IoctlGetTermios(fds[0], unix.TCGETS); err != nil { 115 return nil, fmt.Errorf("fd is not a terminal (ioctl TGGETS got %v)", err) 116 } 117 118 return os.NewFile(uintptr(fds[0]), "pty_master"), nil 119 } 120 121 // Test that an pty FD is sent over the console socket if one is provided. 122 func TestConsoleSocket(t *testing.T) { 123 for name, conf := range configs(t, false /* noOverlay */) { 124 t.Run(name, func(t *testing.T) { 125 spec := testutil.NewSpecWithArgs("true") 126 spec.Process.Terminal = true 127 _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) 128 if err != nil { 129 t.Fatalf("error setting up container: %v", err) 130 } 131 defer cleanup() 132 133 sock, err := socketPath(bundleDir) 134 if err != nil { 135 t.Fatalf("error getting socket path: %v", err) 136 } 137 srv, cleanup := createConsoleSocket(t, sock) 138 defer cleanup() 139 140 // Create the container and pass the socket name. 141 args := Args{ 142 ID: testutil.RandomContainerID(), 143 Spec: spec, 144 BundleDir: bundleDir, 145 ConsoleSocket: sock, 146 } 147 c, err := New(conf, args) 148 if err != nil { 149 t.Fatalf("error creating container: %v", err) 150 } 151 defer c.Destroy() 152 153 // Make sure we get a console PTY. 154 ptyMaster, err := receiveConsolePTY(srv) 155 if err != nil { 156 t.Fatalf("error receiving console FD: %v", err) 157 } 158 ptyMaster.Close() 159 }) 160 } 161 } 162 163 // Test that an pty FD is sent over the console socket if one is provided. 164 func TestMultiContainerConsoleSocket(t *testing.T) { 165 for name, conf := range configs(t, false /* noOverlay */) { 166 t.Run(name, func(t *testing.T) { 167 rootDir, cleanup, err := testutil.SetupRootDir() 168 if err != nil { 169 t.Fatalf("error creating root dir: %v", err) 170 } 171 defer cleanup() 172 conf.RootDir = rootDir 173 174 // Setup the containers. 175 sleep := []string{"sleep", "100"} 176 tru := []string{"true"} 177 testSpecs, ids := createSpecs(sleep, tru) 178 testSpecs[1].Process.Terminal = true 179 180 bundleDir, cleanup, err := testutil.SetupBundleDir(testSpecs[0]) 181 if err != nil { 182 t.Fatalf("error setting up container: %v", err) 183 } 184 defer cleanup() 185 186 args := Args{ 187 ID: ids[0], 188 Spec: testSpecs[0], 189 BundleDir: bundleDir, 190 } 191 rootCont, err := New(conf, args) 192 if err != nil { 193 t.Fatalf("error creating container: %v", err) 194 } 195 defer rootCont.Destroy() 196 if err := rootCont.Start(conf); err != nil { 197 t.Fatalf("error starting container: %v", err) 198 } 199 200 bundleDir, cleanup, err = testutil.SetupBundleDir(testSpecs[0]) 201 if err != nil { 202 t.Fatalf("error setting up container: %v", err) 203 } 204 defer cleanup() 205 206 sock, err := socketPath(bundleDir) 207 if err != nil { 208 t.Fatalf("error getting socket path: %v", err) 209 } 210 srv, cleanup := createConsoleSocket(t, sock) 211 defer cleanup() 212 213 // Create the container and pass the socket name. 214 args = Args{ 215 ID: ids[1], 216 Spec: testSpecs[1], 217 BundleDir: bundleDir, 218 ConsoleSocket: sock, 219 } 220 cont, err := New(conf, args) 221 if err != nil { 222 t.Fatalf("error creating container: %v", err) 223 } 224 defer cont.Destroy() 225 if err := cont.Start(conf); err != nil { 226 t.Fatalf("error starting container: %v", err) 227 } 228 229 // Make sure we get a console PTY. 230 ptyMaster, err := receiveConsolePTY(srv) 231 if err != nil { 232 t.Fatalf("error receiving console FD: %v", err) 233 } 234 ptyMaster.Close() 235 }) 236 } 237 } 238 239 // Test that job control signals work on a console created with "exec -ti". 240 func TestJobControlSignalExec(t *testing.T) { 241 spec := testutil.NewSpecWithArgs("/bin/sleep", "10000") 242 conf := testutil.TestConfig(t) 243 244 _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) 245 if err != nil { 246 t.Fatalf("error setting up container: %v", err) 247 } 248 defer cleanup() 249 250 // Create and start the container. 251 args := Args{ 252 ID: testutil.RandomContainerID(), 253 Spec: spec, 254 BundleDir: bundleDir, 255 } 256 c, err := New(conf, args) 257 if err != nil { 258 t.Fatalf("error creating container: %v", err) 259 } 260 defer c.Destroy() 261 if err := c.Start(conf); err != nil { 262 t.Fatalf("error starting container: %v", err) 263 } 264 265 // Create a pty master/replica. The replica will be passed to the exec 266 // process. 267 ptyMaster, ptyReplica, err := pty.Open() 268 if err != nil { 269 t.Fatalf("error opening pty: %v", err) 270 } 271 defer ptyMaster.Close() 272 defer ptyReplica.Close() 273 274 // Exec bash and attach a terminal. Note that occasionally /bin/sh 275 // may be a different shell or have a different configuration (such 276 // as disabling interactive mode and job control). Since we want to 277 // explicitly test interactive mode, use /bin/bash. See b/116981926. 278 execArgs := &control.ExecArgs{ 279 Filename: "/bin/bash", 280 // Don't let bash execute from profile or rc files, otherwise 281 // our PID counts get messed up. 282 Argv: []string{"/bin/bash", "--noprofile", "--norc"}, 283 // Pass the pty replica as FD 0, 1, and 2. 284 FilePayload: control.NewFilePayload(map[int]*os.File{ 285 0: ptyReplica, 1: ptyReplica, 2: ptyReplica, 286 }, nil), 287 StdioIsPty: true, 288 } 289 290 pid, err := c.Execute(conf, execArgs) 291 if err != nil { 292 t.Fatalf("error executing: %v", err) 293 } 294 if pid != 2 { 295 t.Fatalf("exec got pid %d, wanted %d", pid, 2) 296 } 297 298 // Make sure all the processes are running. 299 expectedPL := []*control.Process{ 300 // Root container process. 301 newProcessBuilder().Cmd("sleep").Process(), 302 // Bash from exec process. 303 newProcessBuilder().PID(2).Cmd("bash").Process(), 304 } 305 if err := waitForProcessList(c, expectedPL); err != nil { 306 t.Error(err) 307 } 308 309 // Execute sleep. 310 if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil { 311 t.Fatalf("ptyMaster.Write: %v", err) 312 } 313 314 // Wait for it to start. Sleep's PPID is bash's PID. 315 expectedPL = append(expectedPL, newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process()) 316 if err := waitForProcessList(c, expectedPL); err != nil { 317 t.Error(err) 318 } 319 320 // Send a SIGTERM to the foreground process for the exec PID. Note that 321 // although we pass in the PID of "bash", it should actually terminate 322 // "sleep", since that is the foreground process. 323 if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGTERM, true /* fgProcess */); err != nil { 324 t.Fatalf("error signaling container: %v", err) 325 } 326 327 // Sleep process should be gone. 328 expectedPL = expectedPL[:len(expectedPL)-1] 329 if err := waitForProcessList(c, expectedPL); err != nil { 330 t.Error(err) 331 } 332 333 // Sleep is dead, but it may take more time for bash to notice and 334 // change the foreground process back to itself. We know it is done 335 // when bash writes "Terminated" to the pty. 336 if err := testutil.WaitUntilRead(ptyMaster, "Terminated", 5*time.Second); err != nil { 337 t.Fatalf("bash did not take over pty: %v", err) 338 } 339 340 // Send a SIGKILL to the foreground process again. This time "bash" 341 // should be killed. We use SIGKILL instead of SIGTERM or SIGINT 342 // because bash ignores those. 343 if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGKILL, true /* fgProcess */); err != nil { 344 t.Fatalf("error signaling container: %v", err) 345 } 346 expectedPL = expectedPL[:1] 347 if err := waitForProcessList(c, expectedPL); err != nil { 348 t.Error(err) 349 } 350 351 // Make sure the process indicates it was killed by a SIGKILL. 352 ws, err := c.WaitPID(pid) 353 if err != nil { 354 t.Errorf("waiting on container failed: %v", err) 355 } 356 if !ws.Signaled() { 357 t.Error("ws.Signaled() got false, want true") 358 } 359 if got, want := ws.Signal(), unix.SIGKILL; got != want { 360 t.Errorf("ws.Signal() got %v, want %v", got, want) 361 } 362 } 363 364 // Test that job control signals work on a console created with "run -ti". 365 func TestJobControlSignalRootContainer(t *testing.T) { 366 conf := testutil.TestConfig(t) 367 // Don't let bash execute from profile or rc files, otherwise our PID 368 // counts get messed up. 369 spec := testutil.NewSpecWithArgs("/bin/bash", "--noprofile", "--norc") 370 spec.Process.Terminal = true 371 372 _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) 373 if err != nil { 374 t.Fatalf("error setting up container: %v", err) 375 } 376 defer cleanup() 377 378 sock, err := socketPath(bundleDir) 379 if err != nil { 380 t.Fatalf("error getting socket path: %v", err) 381 } 382 srv, cleanup := createConsoleSocket(t, sock) 383 defer cleanup() 384 385 // Create the container and pass the socket name. 386 args := Args{ 387 ID: testutil.RandomContainerID(), 388 Spec: spec, 389 BundleDir: bundleDir, 390 ConsoleSocket: sock, 391 } 392 c, err := New(conf, args) 393 if err != nil { 394 t.Fatalf("error creating container: %v", err) 395 } 396 defer c.Destroy() 397 398 // Get the PTY master. 399 ptyMaster, err := receiveConsolePTY(srv) 400 if err != nil { 401 t.Fatalf("error receiving console FD: %v", err) 402 } 403 defer ptyMaster.Close() 404 405 // Bash output as well as sandbox output will be written to the PTY 406 // file. Writes after a certain point will block unless we drain the 407 // PTY, so we must continually copy from it. 408 // 409 // We log the output to stderr for debugabilitly, and also to a buffer, 410 // since we wait on particular output from bash below. We use a custom 411 // blockingBuffer which is thread-safe and also blocks on Read calls, 412 // which makes this a suitable Reader for WaitUntilRead. 413 ptyBuf := newBlockingBuffer() 414 tee := io.TeeReader(ptyMaster, ptyBuf) 415 go func() { 416 _, _ = io.Copy(os.Stderr, tee) 417 }() 418 419 // Start the container. 420 if err := c.Start(conf); err != nil { 421 t.Fatalf("error starting container: %v", err) 422 } 423 424 // Start waiting for the container to exit in a goroutine. We do this 425 // very early, otherwise it might exit before we have a chance to call 426 // Wait. 427 var ( 428 ws unix.WaitStatus 429 wg sync.WaitGroup 430 ) 431 wg.Add(1) 432 go func() { 433 var err error 434 ws, err = c.Wait() 435 if err != nil { 436 t.Errorf("error waiting on container: %v", err) 437 } 438 wg.Done() 439 }() 440 441 // Wait for bash to start. 442 expectedPL := []*control.Process{ 443 newProcessBuilder().PID(1).Cmd("bash").Process(), 444 } 445 if err := waitForProcessList(c, expectedPL); err != nil { 446 t.Fatalf("error waiting for processes: %v", err) 447 } 448 449 // Execute sleep via the terminal. 450 if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil { 451 t.Fatalf("ptyMaster.Write(): %v", err) 452 } 453 454 // Wait for sleep to start. 455 expectedPL = append(expectedPL, newProcessBuilder().PID(2).PPID(1).Cmd("sleep").Process()) 456 if err := waitForProcessList(c, expectedPL); err != nil { 457 t.Fatalf("error waiting for processes: %v", err) 458 } 459 460 // Reset the pty buffer, so there is less output for us to scan later. 461 ptyBuf.Reset() 462 463 // Send a SIGTERM to the foreground process. We pass PID=0, indicating 464 // that the root process should be killed. However, by setting 465 // fgProcess=true, the signal should actually be sent to sleep. 466 if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGTERM, true /* fgProcess */); err != nil { 467 t.Fatalf("error signaling container: %v", err) 468 } 469 470 // Sleep process should be gone. 471 expectedPL = expectedPL[:len(expectedPL)-1] 472 if err := waitForProcessList(c, expectedPL); err != nil { 473 t.Error(err) 474 } 475 476 // Sleep is dead, but it may take more time for bash to notice and 477 // change the foreground process back to itself. We know it is done 478 // when bash writes "Terminated" to the pty. 479 if err := testutil.WaitUntilRead(ptyBuf, "Terminated", 5*time.Second); err != nil { 480 t.Fatalf("bash did not take over pty: %v", err) 481 } 482 483 // Send a SIGKILL to the foreground process again. This time "bash" 484 // should be killed. We use SIGKILL instead of SIGTERM or SIGINT 485 // because bash ignores those. 486 if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGKILL, true /* fgProcess */); err != nil { 487 t.Fatalf("error signaling container: %v", err) 488 } 489 490 // Wait for the sandbox to exit. It should exit with a SIGKILL status. 491 wg.Wait() 492 if !ws.Signaled() { 493 t.Error("ws.Signaled() got false, want true") 494 } 495 if got, want := ws.Signal(), unix.SIGKILL; got != want { 496 t.Errorf("ws.Signal() got %v, want %v", got, want) 497 } 498 } 499 500 // Test that terminal works with root and sub-containers. 501 func TestMultiContainerTerminal(t *testing.T) { 502 for name, conf := range configs(t, false /* noOverlay */) { 503 t.Run(name, func(t *testing.T) { 504 rootDir, cleanup, err := testutil.SetupRootDir() 505 if err != nil { 506 t.Fatalf("error creating root dir: %v", err) 507 } 508 defer cleanup() 509 conf.RootDir = rootDir 510 511 // Don't let bash execute from profile or rc files, otherwise our PID 512 // counts get messed up. 513 bash := []string{"/bin/bash", "--noprofile", "--norc"} 514 testSpecs, ids := createSpecs(bash, bash) 515 516 type termContainer struct { 517 container *Container 518 master *os.File 519 } 520 var containers []termContainer 521 for i, spec := range testSpecs { 522 bundleDir, cleanup, err := testutil.SetupBundleDir(spec) 523 if err != nil { 524 t.Fatalf("error setting up container: %v", err) 525 } 526 defer cleanup() 527 528 spec.Process.Terminal = true 529 sock, err := socketPath(bundleDir) 530 if err != nil { 531 t.Fatalf("error getting socket path: %v", err) 532 } 533 srv, cleanup := createConsoleSocket(t, sock) 534 defer cleanup() 535 536 // Create the container and pass the socket name. 537 args := Args{ 538 ID: ids[i], 539 Spec: spec, 540 BundleDir: bundleDir, 541 ConsoleSocket: sock, 542 } 543 cont, err := New(conf, args) 544 if err != nil { 545 t.Fatalf("error creating container: %v", err) 546 } 547 defer cont.Destroy() 548 549 if err := cont.Start(conf); err != nil { 550 t.Fatalf("error starting container: %v", err) 551 } 552 553 // Make sure we get a console PTY. 554 ptyMaster, err := receiveConsolePTY(srv) 555 if err != nil { 556 t.Fatalf("error receiving console FD: %v", err) 557 } 558 defer ptyMaster.Close() 559 560 containers = append(containers, termContainer{ 561 container: cont, 562 master: ptyMaster, 563 }) 564 } 565 566 for _, tc := range containers { 567 // Bash output as well as sandbox output will be written to the PTY 568 // file. Writes after a certain point will block unless we drain the 569 // PTY, so we must continually copy from it. 570 // 571 // We log the output to stderr for debuggability, and also to a buffer, 572 // since we wait on particular output from bash below. We use a custom 573 // blockingBuffer which is thread-safe and also blocks on Read calls, 574 // which makes this a suitable Reader for WaitUntilRead. 575 ptyBuf := newBlockingBuffer() 576 tee := io.TeeReader(tc.master, ptyBuf) 577 go func() { 578 _, _ = io.Copy(os.Stderr, tee) 579 }() 580 581 // Wait for bash to start. 582 expectedPL := []*control.Process{ 583 newProcessBuilder().Cmd("bash").Process(), 584 } 585 if err := waitForProcessList(tc.container, expectedPL); err != nil { 586 t.Fatalf("error waiting for processes: %v", err) 587 } 588 589 // Execute echo command and check that it was executed correctly. Use 590 // a variable to ensure it's not matching against command echo. 591 if _, err := tc.master.Write([]byte("echo foo-${PWD}-123\n")); err != nil { 592 t.Fatalf("master.Write(): %v", err) 593 } 594 if err := testutil.WaitUntilRead(ptyBuf, "foo-/-123", 5*time.Second); err != nil { 595 t.Fatalf("echo didn't execute: %v", err) 596 } 597 } 598 }) 599 } 600 } 601 602 // blockingBuffer is a thread-safe buffer that blocks when reading if the 603 // buffer is empty. It implements io.ReadWriter. 604 type blockingBuffer struct { 605 // A send to readCh indicates that a previously empty buffer now has 606 // data for reading. 607 readCh chan struct{} 608 609 // mu protects buf. 610 mu sync.Mutex 611 buf bytes.Buffer 612 } 613 614 func newBlockingBuffer() *blockingBuffer { 615 return &blockingBuffer{ 616 readCh: make(chan struct{}, 1), 617 } 618 } 619 620 // Write implements Writer.Write. 621 func (bb *blockingBuffer) Write(p []byte) (int, error) { 622 bb.mu.Lock() 623 defer bb.mu.Unlock() 624 l := bb.buf.Len() 625 n, err := bb.buf.Write(p) 626 if l == 0 && n > 0 { 627 // New data! 628 bb.readCh <- struct{}{} 629 } 630 return n, err 631 } 632 633 // Read implements Reader.Read. It will block until data is available. 634 func (bb *blockingBuffer) Read(p []byte) (int, error) { 635 for { 636 bb.mu.Lock() 637 n, err := bb.buf.Read(p) 638 if n > 0 || err != io.EOF { 639 if bb.buf.Len() == 0 { 640 // Reset the readCh. 641 select { 642 case <-bb.readCh: 643 default: 644 } 645 } 646 bb.mu.Unlock() 647 return n, err 648 } 649 bb.mu.Unlock() 650 651 // Wait for new data. 652 <-bb.readCh 653 } 654 } 655 656 // Reset resets the buffer. 657 func (bb *blockingBuffer) Reset() { 658 bb.mu.Lock() 659 defer bb.mu.Unlock() 660 bb.buf.Reset() 661 // Reset the readCh. 662 select { 663 case <-bb.readCh: 664 default: 665 } 666 }