github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/runsc/container/console_test.go (about) 1 // Copyright 2018 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package container 16 17 import ( 18 "bytes" 19 "fmt" 20 "io" 21 "math/rand" 22 "os" 23 "path/filepath" 24 "testing" 25 "time" 26 27 "github.com/kr/pty" 28 "golang.org/x/sys/unix" 29 "github.com/SagerNet/gvisor/pkg/sentry/control" 30 "github.com/SagerNet/gvisor/pkg/sync" 31 "github.com/SagerNet/gvisor/pkg/test/testutil" 32 "github.com/SagerNet/gvisor/pkg/unet" 33 "github.com/SagerNet/gvisor/pkg/urpc" 34 ) 35 36 // socketPath creates a path inside bundleDir and ensures that the returned 37 // path is under 108 charactors (the unix socket path length limit), 38 // relativizing the path if necessary. 39 func socketPath(bundleDir string) (string, error) { 40 num := rand.Intn(10000) 41 path := filepath.Join(bundleDir, fmt.Sprintf("socket-%4d", num)) 42 const maxPathLen = 108 43 if len(path) <= maxPathLen { 44 return path, nil 45 } 46 47 // Path is too large, try to make it smaller. 48 cwd, err := os.Getwd() 49 if err != nil { 50 return "", fmt.Errorf("error getting cwd: %v", err) 51 } 52 path, err = filepath.Rel(cwd, path) 53 if err != nil { 54 return "", fmt.Errorf("error getting relative path for %q from cwd %q: %v", path, cwd, err) 55 } 56 if len(path) > maxPathLen { 57 return "", fmt.Errorf("could not get socket path under length limit %d: %s", maxPathLen, path) 58 } 59 return path, nil 60 } 61 62 // createConsoleSocket creates a socket at the given path that will receive a 63 // console fd from the sandbox. If an error occurs, t.Fatalf will be called. 64 // The function returning should be deferred as cleanup. 65 func createConsoleSocket(t *testing.T, path string) (*unet.ServerSocket, func()) { 66 t.Helper() 67 srv, err := unet.BindAndListen(path, false) 68 if err != nil { 69 t.Fatalf("error binding and listening to socket %q: %v", path, err) 70 } 71 72 cleanup := func() { 73 // Log errors; nothing can be done. 74 if err := srv.Close(); err != nil { 75 t.Logf("error closing socket %q: %v", path, err) 76 } 77 if err := os.Remove(path); err != nil { 78 t.Logf("error removing socket %q: %v", path, err) 79 } 80 } 81 82 return srv, cleanup 83 } 84 85 // receiveConsolePTY accepts a connection on the server socket and reads fds. 86 // It fails if more than one FD is received, or if the FD is not a PTY. It 87 // returns the PTY master file. 88 func receiveConsolePTY(srv *unet.ServerSocket) (*os.File, error) { 89 sock, err := srv.Accept() 90 if err != nil { 91 return nil, fmt.Errorf("error accepting socket connection: %v", err) 92 } 93 94 // Allow 3 fds to be received. We only expect 1. 95 r := sock.Reader(true /* blocking */) 96 r.EnableFDs(1) 97 98 // The socket is closed right after sending the FD, so EOF is 99 // an allowed error. 100 b := [][]byte{{}} 101 if _, err := r.ReadVec(b); err != nil && err != io.EOF { 102 return nil, fmt.Errorf("error reading from socket connection: %v", err) 103 } 104 105 // We should have gotten a control message. 106 fds, err := r.ExtractFDs() 107 if err != nil { 108 return nil, fmt.Errorf("error extracting fds from socket connection: %v", err) 109 } 110 if len(fds) != 1 { 111 return nil, fmt.Errorf("got %d fds from socket, wanted 1", len(fds)) 112 } 113 114 // Verify that the fd is a terminal. 115 if _, err := unix.IoctlGetTermios(fds[0], unix.TCGETS); err != nil { 116 return nil, fmt.Errorf("fd is not a terminal (ioctl TGGETS got %v)", err) 117 } 118 119 return os.NewFile(uintptr(fds[0]), "pty_master"), nil 120 } 121 122 // Test that an pty FD is sent over the console socket if one is provided. 123 func TestConsoleSocket(t *testing.T) { 124 for name, conf := range configs(t, all...) { 125 t.Run(name, func(t *testing.T) { 126 spec := testutil.NewSpecWithArgs("true") 127 spec.Process.Terminal = true 128 _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) 129 if err != nil { 130 t.Fatalf("error setting up container: %v", err) 131 } 132 defer cleanup() 133 134 sock, err := socketPath(bundleDir) 135 if err != nil { 136 t.Fatalf("error getting socket path: %v", err) 137 } 138 srv, cleanup := createConsoleSocket(t, sock) 139 defer cleanup() 140 141 // Create the container and pass the socket name. 142 args := Args{ 143 ID: testutil.RandomContainerID(), 144 Spec: spec, 145 BundleDir: bundleDir, 146 ConsoleSocket: sock, 147 } 148 c, err := New(conf, args) 149 if err != nil { 150 t.Fatalf("error creating container: %v", err) 151 } 152 defer c.Destroy() 153 154 // Make sure we get a console PTY. 155 ptyMaster, err := receiveConsolePTY(srv) 156 if err != nil { 157 t.Fatalf("error receiving console FD: %v", err) 158 } 159 ptyMaster.Close() 160 }) 161 } 162 } 163 164 // Test that an pty FD is sent over the console socket if one is provided. 165 func TestMultiContainerConsoleSocket(t *testing.T) { 166 for name, conf := range configs(t, all...) { 167 t.Run(name, func(t *testing.T) { 168 rootDir, cleanup, err := testutil.SetupRootDir() 169 if err != nil { 170 t.Fatalf("error creating root dir: %v", err) 171 } 172 defer cleanup() 173 conf.RootDir = rootDir 174 175 // Setup the containers. 176 sleep := []string{"sleep", "100"} 177 tru := []string{"true"} 178 testSpecs, ids := createSpecs(sleep, tru) 179 testSpecs[1].Process.Terminal = true 180 181 bundleDir, cleanup, err := testutil.SetupBundleDir(testSpecs[0]) 182 if err != nil { 183 t.Fatalf("error setting up container: %v", err) 184 } 185 defer cleanup() 186 187 args := Args{ 188 ID: ids[0], 189 Spec: testSpecs[0], 190 BundleDir: bundleDir, 191 } 192 rootCont, err := New(conf, args) 193 if err != nil { 194 t.Fatalf("error creating container: %v", err) 195 } 196 defer rootCont.Destroy() 197 if err := rootCont.Start(conf); err != nil { 198 t.Fatalf("error starting container: %v", err) 199 } 200 201 bundleDir, cleanup, err = testutil.SetupBundleDir(testSpecs[0]) 202 if err != nil { 203 t.Fatalf("error setting up container: %v", err) 204 } 205 defer cleanup() 206 207 sock, err := socketPath(bundleDir) 208 if err != nil { 209 t.Fatalf("error getting socket path: %v", err) 210 } 211 srv, cleanup := createConsoleSocket(t, sock) 212 defer cleanup() 213 214 // Create the container and pass the socket name. 215 args = Args{ 216 ID: ids[1], 217 Spec: testSpecs[1], 218 BundleDir: bundleDir, 219 ConsoleSocket: sock, 220 } 221 cont, err := New(conf, args) 222 if err != nil { 223 t.Fatalf("error creating container: %v", err) 224 } 225 defer cont.Destroy() 226 if err := cont.Start(conf); err != nil { 227 t.Fatalf("error starting container: %v", err) 228 } 229 230 // Make sure we get a console PTY. 231 ptyMaster, err := receiveConsolePTY(srv) 232 if err != nil { 233 t.Fatalf("error receiving console FD: %v", err) 234 } 235 ptyMaster.Close() 236 }) 237 } 238 } 239 240 // Test that job control signals work on a console created with "exec -ti". 241 func TestJobControlSignalExec(t *testing.T) { 242 spec := testutil.NewSpecWithArgs("/bin/sleep", "10000") 243 conf := testutil.TestConfig(t) 244 245 _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) 246 if err != nil { 247 t.Fatalf("error setting up container: %v", err) 248 } 249 defer cleanup() 250 251 // Create and start the container. 252 args := Args{ 253 ID: testutil.RandomContainerID(), 254 Spec: spec, 255 BundleDir: bundleDir, 256 } 257 c, err := New(conf, args) 258 if err != nil { 259 t.Fatalf("error creating container: %v", err) 260 } 261 defer c.Destroy() 262 if err := c.Start(conf); err != nil { 263 t.Fatalf("error starting container: %v", err) 264 } 265 266 // Create a pty master/replica. The replica will be passed to the exec 267 // process. 268 ptyMaster, ptyReplica, err := pty.Open() 269 if err != nil { 270 t.Fatalf("error opening pty: %v", err) 271 } 272 defer ptyMaster.Close() 273 defer ptyReplica.Close() 274 275 // Exec bash and attach a terminal. Note that occasionally /bin/sh 276 // may be a different shell or have a different configuration (such 277 // as disabling interactive mode and job control). Since we want to 278 // explicitly test interactive mode, use /bin/bash. See b/116981926. 279 execArgs := &control.ExecArgs{ 280 Filename: "/bin/bash", 281 // Don't let bash execute from profile or rc files, otherwise 282 // our PID counts get messed up. 283 Argv: []string{"/bin/bash", "--noprofile", "--norc"}, 284 // Pass the pty replica as FD 0, 1, and 2. 285 FilePayload: urpc.FilePayload{ 286 Files: []*os.File{ptyReplica, ptyReplica, ptyReplica}, 287 }, 288 StdioIsPty: true, 289 } 290 291 pid, err := c.Execute(execArgs) 292 if err != nil { 293 t.Fatalf("error executing: %v", err) 294 } 295 if pid != 2 { 296 t.Fatalf("exec got pid %d, wanted %d", pid, 2) 297 } 298 299 // Make sure all the processes are running. 300 expectedPL := []*control.Process{ 301 // Root container process. 302 newProcessBuilder().Cmd("sleep").Process(), 303 // Bash from exec process. 304 newProcessBuilder().PID(2).Cmd("bash").Process(), 305 } 306 if err := waitForProcessList(c, expectedPL); err != nil { 307 t.Error(err) 308 } 309 310 // Execute sleep. 311 ptyMaster.Write([]byte("sleep 100\n")) 312 313 // Wait for it to start. Sleep's PPID is bash's PID. 314 expectedPL = append(expectedPL, newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process()) 315 if err := waitForProcessList(c, expectedPL); err != nil { 316 t.Error(err) 317 } 318 319 // Send a SIGTERM to the foreground process for the exec PID. Note that 320 // although we pass in the PID of "bash", it should actually terminate 321 // "sleep", since that is the foreground process. 322 if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGTERM, true /* fgProcess */); err != nil { 323 t.Fatalf("error signaling container: %v", err) 324 } 325 326 // Sleep process should be gone. 327 expectedPL = expectedPL[:len(expectedPL)-1] 328 if err := waitForProcessList(c, expectedPL); err != nil { 329 t.Error(err) 330 } 331 332 // Sleep is dead, but it may take more time for bash to notice and 333 // change the foreground process back to itself. We know it is done 334 // when bash writes "Terminated" to the pty. 335 if err := testutil.WaitUntilRead(ptyMaster, "Terminated", 5*time.Second); err != nil { 336 t.Fatalf("bash did not take over pty: %v", err) 337 } 338 339 // Send a SIGKILL to the foreground process again. This time "bash" 340 // should be killed. We use SIGKILL instead of SIGTERM or SIGINT 341 // because bash ignores those. 342 if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGKILL, true /* fgProcess */); err != nil { 343 t.Fatalf("error signaling container: %v", err) 344 } 345 expectedPL = expectedPL[:1] 346 if err := waitForProcessList(c, expectedPL); err != nil { 347 t.Error(err) 348 } 349 350 // Make sure the process indicates it was killed by a SIGKILL. 351 ws, err := c.WaitPID(pid) 352 if err != nil { 353 t.Errorf("waiting on container failed: %v", err) 354 } 355 if !ws.Signaled() { 356 t.Error("ws.Signaled() got false, want true") 357 } 358 if got, want := ws.Signal(), unix.SIGKILL; got != want { 359 t.Errorf("ws.Signal() got %v, want %v", got, want) 360 } 361 } 362 363 // Test that job control signals work on a console created with "run -ti". 364 func TestJobControlSignalRootContainer(t *testing.T) { 365 conf := testutil.TestConfig(t) 366 // Don't let bash execute from profile or rc files, otherwise our PID 367 // counts get messed up. 368 spec := testutil.NewSpecWithArgs("/bin/bash", "--noprofile", "--norc") 369 spec.Process.Terminal = true 370 371 _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) 372 if err != nil { 373 t.Fatalf("error setting up container: %v", err) 374 } 375 defer cleanup() 376 377 sock, err := socketPath(bundleDir) 378 if err != nil { 379 t.Fatalf("error getting socket path: %v", err) 380 } 381 srv, cleanup := createConsoleSocket(t, sock) 382 defer cleanup() 383 384 // Create the container and pass the socket name. 385 args := Args{ 386 ID: testutil.RandomContainerID(), 387 Spec: spec, 388 BundleDir: bundleDir, 389 ConsoleSocket: sock, 390 } 391 c, err := New(conf, args) 392 if err != nil { 393 t.Fatalf("error creating container: %v", err) 394 } 395 defer c.Destroy() 396 397 // Get the PTY master. 398 ptyMaster, err := receiveConsolePTY(srv) 399 if err != nil { 400 t.Fatalf("error receiving console FD: %v", err) 401 } 402 defer ptyMaster.Close() 403 404 // Bash output as well as sandbox output will be written to the PTY 405 // file. Writes after a certain point will block unless we drain the 406 // PTY, so we must continually copy from it. 407 // 408 // We log the output to stderr for debugabilitly, and also to a buffer, 409 // since we wait on particular output from bash below. We use a custom 410 // blockingBuffer which is thread-safe and also blocks on Read calls, 411 // which makes this a suitable Reader for WaitUntilRead. 412 ptyBuf := newBlockingBuffer() 413 tee := io.TeeReader(ptyMaster, ptyBuf) 414 go io.Copy(os.Stderr, tee) 415 416 // Start the container. 417 if err := c.Start(conf); err != nil { 418 t.Fatalf("error starting container: %v", err) 419 } 420 421 // Start waiting for the container to exit in a goroutine. We do this 422 // very early, otherwise it might exit before we have a chance to call 423 // Wait. 424 var ( 425 ws unix.WaitStatus 426 wg sync.WaitGroup 427 ) 428 wg.Add(1) 429 go func() { 430 var err error 431 ws, err = c.Wait() 432 if err != nil { 433 t.Errorf("error waiting on container: %v", err) 434 } 435 wg.Done() 436 }() 437 438 // Wait for bash to start. 439 expectedPL := []*control.Process{ 440 newProcessBuilder().PID(1).Cmd("bash").Process(), 441 } 442 if err := waitForProcessList(c, expectedPL); err != nil { 443 t.Fatalf("error waiting for processes: %v", err) 444 } 445 446 // Execute sleep via the terminal. 447 ptyMaster.Write([]byte("sleep 100\n")) 448 449 // Wait for sleep to start. 450 expectedPL = append(expectedPL, newProcessBuilder().PID(2).PPID(1).Cmd("sleep").Process()) 451 if err := waitForProcessList(c, expectedPL); err != nil { 452 t.Fatalf("error waiting for processes: %v", err) 453 } 454 455 // Reset the pty buffer, so there is less output for us to scan later. 456 ptyBuf.Reset() 457 458 // Send a SIGTERM to the foreground process. We pass PID=0, indicating 459 // that the root process should be killed. However, by setting 460 // fgProcess=true, the signal should actually be sent to sleep. 461 if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGTERM, true /* fgProcess */); err != nil { 462 t.Fatalf("error signaling container: %v", err) 463 } 464 465 // Sleep process should be gone. 466 expectedPL = expectedPL[:len(expectedPL)-1] 467 if err := waitForProcessList(c, expectedPL); err != nil { 468 t.Error(err) 469 } 470 471 // Sleep is dead, but it may take more time for bash to notice and 472 // change the foreground process back to itself. We know it is done 473 // when bash writes "Terminated" to the pty. 474 if err := testutil.WaitUntilRead(ptyBuf, "Terminated", 5*time.Second); err != nil { 475 t.Fatalf("bash did not take over pty: %v", err) 476 } 477 478 // Send a SIGKILL to the foreground process again. This time "bash" 479 // should be killed. We use SIGKILL instead of SIGTERM or SIGINT 480 // because bash ignores those. 481 if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGKILL, true /* fgProcess */); err != nil { 482 t.Fatalf("error signaling container: %v", err) 483 } 484 485 // Wait for the sandbox to exit. It should exit with a SIGKILL status. 486 wg.Wait() 487 if !ws.Signaled() { 488 t.Error("ws.Signaled() got false, want true") 489 } 490 if got, want := ws.Signal(), unix.SIGKILL; got != want { 491 t.Errorf("ws.Signal() got %v, want %v", got, want) 492 } 493 } 494 495 // Test that terminal works with root and sub-containers. 496 func TestMultiContainerTerminal(t *testing.T) { 497 for name, conf := range configs(t, all...) { 498 t.Run(name, func(t *testing.T) { 499 rootDir, cleanup, err := testutil.SetupRootDir() 500 if err != nil { 501 t.Fatalf("error creating root dir: %v", err) 502 } 503 defer cleanup() 504 conf.RootDir = rootDir 505 506 // Don't let bash execute from profile or rc files, otherwise our PID 507 // counts get messed up. 508 bash := []string{"/bin/bash", "--noprofile", "--norc"} 509 testSpecs, ids := createSpecs(bash, bash) 510 511 type termContainer struct { 512 container *Container 513 master *os.File 514 } 515 var containers []termContainer 516 for i, spec := range testSpecs { 517 bundleDir, cleanup, err := testutil.SetupBundleDir(spec) 518 if err != nil { 519 t.Fatalf("error setting up container: %v", err) 520 } 521 defer cleanup() 522 523 spec.Process.Terminal = true 524 sock, err := socketPath(bundleDir) 525 if err != nil { 526 t.Fatalf("error getting socket path: %v", err) 527 } 528 srv, cleanup := createConsoleSocket(t, sock) 529 defer cleanup() 530 531 // Create the container and pass the socket name. 532 args := Args{ 533 ID: ids[i], 534 Spec: spec, 535 BundleDir: bundleDir, 536 ConsoleSocket: sock, 537 } 538 cont, err := New(conf, args) 539 if err != nil { 540 t.Fatalf("error creating container: %v", err) 541 } 542 defer cont.Destroy() 543 544 if err := cont.Start(conf); err != nil { 545 t.Fatalf("error starting container: %v", err) 546 } 547 548 // Make sure we get a console PTY. 549 ptyMaster, err := receiveConsolePTY(srv) 550 if err != nil { 551 t.Fatalf("error receiving console FD: %v", err) 552 } 553 defer ptyMaster.Close() 554 555 containers = append(containers, termContainer{ 556 container: cont, 557 master: ptyMaster, 558 }) 559 } 560 561 for _, tc := range containers { 562 // Bash output as well as sandbox output will be written to the PTY 563 // file. Writes after a certain point will block unless we drain the 564 // PTY, so we must continually copy from it. 565 // 566 // We log the output to stderr for debugabilitly, and also to a buffer, 567 // since we wait on particular output from bash below. We use a custom 568 // blockingBuffer which is thread-safe and also blocks on Read calls, 569 // which makes this a suitable Reader for WaitUntilRead. 570 ptyBuf := newBlockingBuffer() 571 tee := io.TeeReader(tc.master, ptyBuf) 572 go io.Copy(os.Stderr, tee) 573 574 // Wait for bash to start. 575 expectedPL := []*control.Process{ 576 newProcessBuilder().Cmd("bash").Process(), 577 } 578 if err := waitForProcessList(tc.container, expectedPL); err != nil { 579 t.Fatalf("error waiting for processes: %v", err) 580 } 581 582 // Execute echo command and check that it was executed correctly. Use 583 // a variable to ensure it's not matching against command echo. 584 tc.master.Write([]byte("echo foo-${PWD}-123\n")) 585 if err := testutil.WaitUntilRead(ptyBuf, "foo-/-123", 5*time.Second); err != nil { 586 t.Fatalf("echo didn't execute: %v", err) 587 } 588 } 589 }) 590 } 591 } 592 593 // blockingBuffer is a thread-safe buffer that blocks when reading if the 594 // buffer is empty. It implements io.ReadWriter. 595 type blockingBuffer struct { 596 // A send to readCh indicates that a previously empty buffer now has 597 // data for reading. 598 readCh chan struct{} 599 600 // mu protects buf. 601 mu sync.Mutex 602 buf bytes.Buffer 603 } 604 605 func newBlockingBuffer() *blockingBuffer { 606 return &blockingBuffer{ 607 readCh: make(chan struct{}, 1), 608 } 609 } 610 611 // Write implements Writer.Write. 612 func (bb *blockingBuffer) Write(p []byte) (int, error) { 613 bb.mu.Lock() 614 defer bb.mu.Unlock() 615 l := bb.buf.Len() 616 n, err := bb.buf.Write(p) 617 if l == 0 && n > 0 { 618 // New data! 619 bb.readCh <- struct{}{} 620 } 621 return n, err 622 } 623 624 // Read implements Reader.Read. It will block until data is available. 625 func (bb *blockingBuffer) Read(p []byte) (int, error) { 626 for { 627 bb.mu.Lock() 628 n, err := bb.buf.Read(p) 629 if n > 0 || err != io.EOF { 630 if bb.buf.Len() == 0 { 631 // Reset the readCh. 632 select { 633 case <-bb.readCh: 634 default: 635 } 636 } 637 bb.mu.Unlock() 638 return n, err 639 } 640 bb.mu.Unlock() 641 642 // Wait for new data. 643 <-bb.readCh 644 } 645 } 646 647 // Reset resets the buffer. 648 func (bb *blockingBuffer) Reset() { 649 bb.mu.Lock() 650 defer bb.mu.Unlock() 651 bb.buf.Reset() 652 // Reset the readCh. 653 select { 654 case <-bb.readCh: 655 default: 656 } 657 }