gvisor.dev/gvisor@v0.0.0-20240520182842-f9d4d51c7e0f/test/syscalls/linux/timers.cc (about) 1 // Copyright 2018 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 #include <errno.h> 16 #include <signal.h> 17 #include <sys/resource.h> 18 #include <sys/time.h> 19 #include <syscall.h> 20 #include <time.h> 21 #include <unistd.h> 22 23 #include <atomic> 24 25 #include "gtest/gtest.h" 26 #include "absl/flags/flag.h" 27 #include "absl/time/clock.h" 28 #include "absl/time/time.h" 29 #include "benchmark/benchmark.h" 30 #include "test/util/cleanup.h" 31 #include "test/util/logging.h" 32 #include "test/util/multiprocess_util.h" 33 #include "test/util/posix_error.h" 34 #include "test/util/signal_util.h" 35 #include "test/util/test_util.h" 36 #include "test/util/thread_util.h" 37 #include "test/util/timer_util.h" 38 39 ABSL_FLAG(bool, timers_test_sleep, false, 40 "If true, sleep forever instead of running tests."); 41 42 using ::testing::_; 43 using ::testing::AnyOf; 44 45 namespace gvisor { 46 namespace testing { 47 namespace { 48 49 #ifndef CPUCLOCK_PROF 50 #define CPUCLOCK_PROF 0 51 #endif // CPUCLOCK_PROF 52 53 PosixErrorOr<absl::Duration> ProcessCPUTime(pid_t pid) { 54 // Use pid-specific CPUCLOCK_PROF, which is the clock used to enforce 55 // RLIMIT_CPU. 56 clockid_t clockid = (~static_cast<clockid_t>(pid) << 3) | CPUCLOCK_PROF; 57 58 struct timespec ts; 59 int ret = clock_gettime(clockid, &ts); 60 if (ret < 0) { 61 return PosixError(errno, "clock_gettime failed"); 62 } 63 64 return absl::DurationFromTimespec(ts); 65 } 66 67 void NoopSignalHandler(int signo) { 68 TEST_CHECK_MSG(SIGXCPU == signo, 69 "NoopSigHandler did not receive expected signal"); 70 } 71 72 void UninstallingSignalHandler(int signo) { 73 TEST_CHECK_MSG(SIGXCPU == signo, 74 "UninstallingSignalHandler did not receive expected signal"); 75 struct sigaction rev_action; 76 rev_action.sa_handler = SIG_DFL; 77 rev_action.sa_flags = 0; 78 sigemptyset(&rev_action.sa_mask); 79 sigaction(SIGXCPU, &rev_action, nullptr); 80 } 81 82 TEST(TimerTest, ProcessKilledOnCPUSoftLimit) { 83 constexpr absl::Duration kSoftLimit = absl::Seconds(1); 84 constexpr absl::Duration kHardLimit = absl::Seconds(3); 85 86 struct rlimit cpu_limits; 87 cpu_limits.rlim_cur = absl::ToInt64Seconds(kSoftLimit); 88 cpu_limits.rlim_max = absl::ToInt64Seconds(kHardLimit); 89 90 int pid = fork(); 91 MaybeSave(); 92 if (pid == 0) { 93 TEST_PCHECK(setrlimit(RLIMIT_CPU, &cpu_limits) == 0); 94 MaybeSave(); 95 for (;;) { 96 int x = 0; 97 benchmark::DoNotOptimize(x); // Don't optimize this loop away. 98 } 99 } 100 ASSERT_THAT(pid, SyscallSucceeds()); 101 auto c = Cleanup([pid] { 102 int status; 103 EXPECT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid)); 104 EXPECT_TRUE(WIFSIGNALED(status)); 105 EXPECT_EQ(WTERMSIG(status), SIGXCPU); 106 }); 107 108 // Wait for the child to exit, but do not reap it. This will allow us to check 109 // its CPU usage while it is zombied. 110 EXPECT_THAT(waitid(P_PID, pid, nullptr, WEXITED | WNOWAIT), 111 SyscallSucceeds()); 112 113 // Assert that the child spent 1s of CPU before getting killed. 114 // 115 // We must be careful to use CPUCLOCK_PROF, the same clock used for RLIMIT_CPU 116 // enforcement, to get correct results. Note that this is slightly different 117 // from rusage-reported CPU usage: 118 // 119 // RLIMIT_CPU, CPUCLOCK_PROF use kernel/sched/cputime.c:thread_group_cputime. 120 // rusage uses kernel/sched/cputime.c:thread_group_cputime_adjusted. 121 absl::Duration cpu = ASSERT_NO_ERRNO_AND_VALUE(ProcessCPUTime(pid)); 122 EXPECT_GE(cpu, kSoftLimit); 123 124 // Child did not make it to the hard limit. 125 // 126 // Linux sends SIGXCPU synchronously with CPU tick updates. See 127 // kernel/time/timer.c:update_process_times: 128 // => account_process_tick // update task CPU usage. 129 // => run_posix_cpu_timers // enforce RLIMIT_CPU, sending signal. 130 // 131 // Thus, only chance for this to flake is if the system time required to 132 // deliver the signal exceeds 2s. 133 EXPECT_LT(cpu, kHardLimit); 134 } 135 136 TEST(TimerTest, ProcessPingedRepeatedlyAfterCPUSoftLimit) { 137 struct sigaction new_action; 138 new_action.sa_handler = UninstallingSignalHandler; 139 new_action.sa_flags = 0; 140 sigemptyset(&new_action.sa_mask); 141 142 constexpr absl::Duration kSoftLimit = absl::Seconds(1); 143 constexpr absl::Duration kHardLimit = absl::Seconds(10); 144 145 struct rlimit cpu_limits; 146 cpu_limits.rlim_cur = absl::ToInt64Seconds(kSoftLimit); 147 cpu_limits.rlim_max = absl::ToInt64Seconds(kHardLimit); 148 149 int pid = fork(); 150 MaybeSave(); 151 if (pid == 0) { 152 TEST_PCHECK(sigaction(SIGXCPU, &new_action, nullptr) == 0); 153 MaybeSave(); 154 TEST_PCHECK(setrlimit(RLIMIT_CPU, &cpu_limits) == 0); 155 MaybeSave(); 156 for (;;) { 157 int x = 0; 158 benchmark::DoNotOptimize(x); // Don't optimize this loop away. 159 } 160 } 161 ASSERT_THAT(pid, SyscallSucceeds()); 162 auto c = Cleanup([pid] { 163 int status; 164 EXPECT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid)); 165 EXPECT_TRUE(WIFSIGNALED(status)); 166 EXPECT_EQ(WTERMSIG(status), SIGXCPU); 167 }); 168 169 // Wait for the child to exit, but do not reap it. This will allow us to check 170 // its CPU usage while it is zombied. 171 EXPECT_THAT(waitid(P_PID, pid, nullptr, WEXITED | WNOWAIT), 172 SyscallSucceeds()); 173 174 absl::Duration cpu = ASSERT_NO_ERRNO_AND_VALUE(ProcessCPUTime(pid)); 175 // Following signals come every CPU second. 176 EXPECT_GE(cpu, kSoftLimit + absl::Seconds(1)); 177 178 // Child did not make it to the hard limit. 179 // 180 // As above, should not flake. 181 EXPECT_LT(cpu, kHardLimit); 182 } 183 184 TEST(TimerTest, ProcessKilledOnCPUHardLimit) { 185 struct sigaction new_action; 186 new_action.sa_handler = NoopSignalHandler; 187 new_action.sa_flags = 0; 188 sigemptyset(&new_action.sa_mask); 189 190 constexpr absl::Duration kSoftLimit = absl::Seconds(1); 191 constexpr absl::Duration kHardLimit = absl::Seconds(3); 192 193 struct rlimit cpu_limits; 194 cpu_limits.rlim_cur = absl::ToInt64Seconds(kSoftLimit); 195 cpu_limits.rlim_max = absl::ToInt64Seconds(kHardLimit); 196 197 int pid = fork(); 198 MaybeSave(); 199 if (pid == 0) { 200 TEST_PCHECK(sigaction(SIGXCPU, &new_action, nullptr) == 0); 201 MaybeSave(); 202 TEST_PCHECK(setrlimit(RLIMIT_CPU, &cpu_limits) == 0); 203 MaybeSave(); 204 for (;;) { 205 int x = 0; 206 benchmark::DoNotOptimize(x); // Don't optimize this loop away. 207 } 208 } 209 ASSERT_THAT(pid, SyscallSucceeds()); 210 auto c = Cleanup([pid] { 211 int status; 212 EXPECT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid)); 213 EXPECT_TRUE(WIFSIGNALED(status)); 214 EXPECT_EQ(WTERMSIG(status), SIGKILL); 215 }); 216 217 // Wait for the child to exit, but do not reap it. This will allow us to check 218 // its CPU usage while it is zombied. 219 EXPECT_THAT(waitid(P_PID, pid, nullptr, WEXITED | WNOWAIT), 220 SyscallSucceeds()); 221 222 absl::Duration cpu = ASSERT_NO_ERRNO_AND_VALUE(ProcessCPUTime(pid)); 223 EXPECT_GE(cpu, kHardLimit); 224 } 225 226 // See timerfd.cc:TimerSlack() for rationale. 227 constexpr absl::Duration kTimerSlack = absl::Milliseconds(500); 228 229 TEST(IntervalTimerTest, IsInitiallyStopped) { 230 struct sigevent sev = {}; 231 sev.sigev_notify = SIGEV_NONE; 232 const auto timer = 233 ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev)); 234 const struct itimerspec its = ASSERT_NO_ERRNO_AND_VALUE(timer.Get()); 235 EXPECT_EQ(0, its.it_value.tv_sec); 236 EXPECT_EQ(0, its.it_value.tv_nsec); 237 } 238 239 // Kernel can create multiple timers without issue. 240 // 241 // Regression test for gvisor.dev/issue/1738. 242 TEST(IntervalTimerTest, MultipleTimers) { 243 struct sigevent sev = {}; 244 sev.sigev_notify = SIGEV_NONE; 245 const auto timer1 = 246 ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev)); 247 const auto timer2 = 248 ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev)); 249 } 250 251 TEST(IntervalTimerTest, SingleShotSilent) { 252 struct sigevent sev = {}; 253 sev.sigev_notify = SIGEV_NONE; 254 const auto timer = 255 ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev)); 256 257 constexpr absl::Duration kDelay = absl::Seconds(1); 258 struct itimerspec its = {}; 259 its.it_value = absl::ToTimespec(kDelay); 260 ASSERT_NO_ERRNO(timer.Set(0, its)); 261 262 // The timer should count down to 0 and stop since the interval is zero. No 263 // overruns should be counted. 264 absl::SleepFor(kDelay + kTimerSlack); 265 its = ASSERT_NO_ERRNO_AND_VALUE(timer.Get()); 266 EXPECT_EQ(0, its.it_value.tv_sec); 267 EXPECT_EQ(0, its.it_value.tv_nsec); 268 EXPECT_THAT(timer.Overruns(), IsPosixErrorOkAndHolds(0)); 269 } 270 271 TEST(IntervalTimerTest, PeriodicSilent) { 272 struct sigevent sev = {}; 273 sev.sigev_notify = SIGEV_NONE; 274 const auto timer = 275 ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev)); 276 277 constexpr absl::Duration kPeriod = absl::Seconds(1); 278 struct itimerspec its = {}; 279 its.it_value = its.it_interval = absl::ToTimespec(kPeriod); 280 ASSERT_NO_ERRNO(timer.Set(0, its)); 281 282 absl::SleepFor(kPeriod * 3 + kTimerSlack); 283 284 // The timer should still be running. 285 its = ASSERT_NO_ERRNO_AND_VALUE(timer.Get()); 286 EXPECT_TRUE(its.it_value.tv_nsec != 0 || its.it_value.tv_sec != 0); 287 288 // Timer expirations are not counted as overruns under SIGEV_NONE. 289 EXPECT_THAT(timer.Overruns(), IsPosixErrorOkAndHolds(0)); 290 } 291 292 std::atomic<int> counted_signals; 293 294 void IntervalTimerCountingSignalHandler(int sig, siginfo_t* info, 295 void* ucontext) { 296 counted_signals.fetch_add(1 + info->si_overrun); 297 } 298 299 TEST(IntervalTimerTest, PeriodicGroupDirectedSignal) { 300 constexpr int kSigno = SIGUSR1; 301 constexpr int kSigvalue = 42; 302 303 // Install our signal handler. 304 counted_signals.store(0); 305 struct sigaction sa = {}; 306 sa.sa_sigaction = IntervalTimerCountingSignalHandler; 307 sigemptyset(&sa.sa_mask); 308 sa.sa_flags = SA_SIGINFO; 309 const auto scoped_sigaction = 310 ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(kSigno, sa)); 311 312 // Ensure that kSigno is unblocked on at least one thread. 313 const auto scoped_sigmask = 314 ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, kSigno)); 315 316 struct sigevent sev = {}; 317 sev.sigev_notify = SIGEV_SIGNAL; 318 sev.sigev_signo = kSigno; 319 sev.sigev_value.sival_int = kSigvalue; 320 auto timer = ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev)); 321 322 constexpr absl::Duration kPeriod = absl::Seconds(1); 323 constexpr int kCycles = 3; 324 struct itimerspec its = {}; 325 its.it_value = its.it_interval = absl::ToTimespec(kPeriod); 326 ASSERT_NO_ERRNO(timer.Set(0, its)); 327 328 absl::SleepFor(kPeriod * kCycles + kTimerSlack); 329 EXPECT_GE(counted_signals.load(), kCycles); 330 } 331 332 TEST(IntervalTimerTest, PeriodicThreadDirectedSignal) { 333 constexpr int kSigno = SIGUSR1; 334 constexpr int kSigvalue = 42; 335 336 // Block kSigno so that we can accumulate overruns. 337 sigset_t mask; 338 sigemptyset(&mask); 339 sigaddset(&mask, kSigno); 340 const auto scoped_sigmask = 341 ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_BLOCK, mask)); 342 343 struct sigevent sev = {}; 344 sev.sigev_notify = SIGEV_THREAD_ID; 345 sev.sigev_signo = kSigno; 346 sev.sigev_value.sival_int = kSigvalue; 347 sev.sigev_notify_thread_id = gettid(); 348 auto timer = ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev)); 349 350 constexpr absl::Duration kPeriod = absl::Seconds(1); 351 constexpr int kCycles = 3; 352 struct itimerspec its = {}; 353 its.it_value = its.it_interval = absl::ToTimespec(kPeriod); 354 ASSERT_NO_ERRNO(timer.Set(0, its)); 355 absl::SleepFor(kPeriod * kCycles + kTimerSlack); 356 357 // At least kCycles expirations should have occurred, resulting in kCycles-1 358 // overruns (the first expiration sent the signal successfully). 359 siginfo_t si; 360 struct timespec zero_ts = absl::ToTimespec(absl::ZeroDuration()); 361 ASSERT_THAT(sigtimedwait(&mask, &si, &zero_ts), 362 SyscallSucceedsWithValue(kSigno)); 363 EXPECT_EQ(si.si_signo, kSigno); 364 EXPECT_EQ(si.si_code, SI_TIMER); 365 EXPECT_EQ(si.si_timerid, timer.get()); 366 EXPECT_GE(si.si_overrun, kCycles - 1); 367 EXPECT_EQ(si.si_int, kSigvalue); 368 369 // Kill the timer, then drain any additional signal it may have enqueued. We 370 // can't do this before the preceding sigtimedwait because stopping or 371 // deleting the timer resets si_overrun to 0. 372 timer.reset(); 373 sigtimedwait(&mask, &si, &zero_ts); 374 } 375 376 TEST(IntervalTimerTest, OtherThreadGroup) { 377 constexpr int kSigno = SIGUSR1; 378 379 // Create a subprocess that does nothing until killed. 380 pid_t child_pid; 381 const auto sp = ASSERT_NO_ERRNO_AND_VALUE(ForkAndExec( 382 "/proc/self/exe", ExecveArray({"timers", "--timers_test_sleep"}), 383 ExecveArray(), &child_pid, nullptr)); 384 385 // Verify that we can't create a timer that would send signals to it. 386 struct sigevent sev = {}; 387 sev.sigev_notify = SIGEV_THREAD_ID; 388 sev.sigev_signo = kSigno; 389 sev.sigev_notify_thread_id = child_pid; 390 EXPECT_THAT(TimerCreate(CLOCK_MONOTONIC, sev), PosixErrorIs(EINVAL, _)); 391 } 392 393 TEST(IntervalTimerTest, RealTimeSignalsAreNotDuplicated) { 394 const int kSigno = SIGRTMIN; 395 constexpr int kSigvalue = 42; 396 397 // Block signo so that we can accumulate overruns. 398 sigset_t mask; 399 sigemptyset(&mask); 400 sigaddset(&mask, kSigno); 401 const auto scoped_sigmask = ScopedSignalMask(SIG_BLOCK, mask); 402 403 struct sigevent sev = {}; 404 sev.sigev_notify = SIGEV_THREAD_ID; 405 sev.sigev_signo = kSigno; 406 sev.sigev_value.sival_int = kSigvalue; 407 sev.sigev_notify_thread_id = gettid(); 408 const auto timer = 409 ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev)); 410 411 constexpr absl::Duration kPeriod = absl::Seconds(1); 412 constexpr int kCycles = 3; 413 struct itimerspec its = {}; 414 its.it_value = its.it_interval = absl::ToTimespec(kPeriod); 415 ASSERT_NO_ERRNO(timer.Set(0, its)); 416 absl::SleepFor(kPeriod * kCycles + kTimerSlack); 417 418 // Stop the timer so that no further signals are enqueued after sigtimedwait. 419 struct timespec zero_ts = absl::ToTimespec(absl::ZeroDuration()); 420 its.it_value = its.it_interval = zero_ts; 421 ASSERT_NO_ERRNO(timer.Set(0, its)); 422 423 // The timer should have sent only a single signal, even though the kernel 424 // supports enqueueing of multiple RT signals. 425 siginfo_t si; 426 ASSERT_THAT(sigtimedwait(&mask, &si, &zero_ts), 427 SyscallSucceedsWithValue(kSigno)); 428 EXPECT_EQ(si.si_signo, kSigno); 429 EXPECT_EQ(si.si_code, SI_TIMER); 430 EXPECT_EQ(si.si_timerid, timer.get()); 431 // si_overrun was reset by timer_settime. 432 EXPECT_EQ(si.si_overrun, 0); 433 EXPECT_EQ(si.si_int, kSigvalue); 434 EXPECT_THAT(sigtimedwait(&mask, &si, &zero_ts), 435 SyscallFailsWithErrno(EAGAIN)); 436 } 437 438 TEST(IntervalTimerTest, AlreadyPendingSignal) { 439 constexpr int kSigno = SIGUSR1; 440 constexpr int kSigvalue = 42; 441 442 // Block kSigno so that we can accumulate overruns. 443 sigset_t mask; 444 sigemptyset(&mask); 445 sigaddset(&mask, kSigno); 446 const auto scoped_sigmask = 447 ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_BLOCK, mask)); 448 449 // Send ourselves a signal, preventing the timer from enqueuing. 450 ASSERT_THAT(tgkill(getpid(), gettid(), kSigno), SyscallSucceeds()); 451 452 struct sigevent sev = {}; 453 sev.sigev_notify = SIGEV_THREAD_ID; 454 sev.sigev_signo = kSigno; 455 sev.sigev_value.sival_int = kSigvalue; 456 sev.sigev_notify_thread_id = gettid(); 457 auto timer = ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev)); 458 459 constexpr absl::Duration kPeriod = absl::Seconds(1); 460 constexpr int kCycles = 3; 461 struct itimerspec its = {}; 462 its.it_value = its.it_interval = absl::ToTimespec(kPeriod); 463 ASSERT_NO_ERRNO(timer.Set(0, its)); 464 465 // End the sleep one cycle short; we will sleep for one more cycle below. 466 absl::SleepFor(kPeriod * (kCycles - 1)); 467 468 // Dequeue the first signal, which we sent to ourselves with tgkill. 469 siginfo_t si; 470 struct timespec zero_ts = absl::ToTimespec(absl::ZeroDuration()); 471 ASSERT_THAT(sigtimedwait(&mask, &si, &zero_ts), 472 SyscallSucceedsWithValue(kSigno)); 473 EXPECT_EQ(si.si_signo, kSigno); 474 // glibc sigtimedwait silently replaces SI_TKILL with SI_USER: 475 // sysdeps/unix/sysv/linux/sigtimedwait.c:__sigtimedwait(). This isn't 476 // documented, so we don't depend on it. 477 EXPECT_THAT(si.si_code, AnyOf(SI_USER, SI_TKILL)); 478 479 // Sleep for 1 more cycle to give the timer time to send a signal. 480 absl::SleepFor(kPeriod + kTimerSlack); 481 482 // At least kCycles expirations should have occurred, resulting in kCycles-1 483 // overruns (the last expiration sent the signal successfully). 484 ASSERT_THAT(sigtimedwait(&mask, &si, &zero_ts), 485 SyscallSucceedsWithValue(kSigno)); 486 EXPECT_EQ(si.si_signo, kSigno); 487 EXPECT_EQ(si.si_code, SI_TIMER); 488 EXPECT_EQ(si.si_timerid, timer.get()); 489 EXPECT_GE(si.si_overrun, kCycles - 1); 490 EXPECT_EQ(si.si_int, kSigvalue); 491 492 // Kill the timer, then drain any additional signal it may have enqueued. We 493 // can't do this before the preceding sigtimedwait because stopping or 494 // deleting the timer resets si_overrun to 0. 495 timer.reset(); 496 sigtimedwait(&mask, &si, &zero_ts); 497 } 498 499 TEST(IntervalTimerTest, IgnoredSignalCountsAsOverrun) { 500 constexpr int kSigno = SIGUSR1; 501 constexpr int kSigvalue = 42; 502 503 // Ignore kSigno. 504 struct sigaction sa = {}; 505 sa.sa_handler = SIG_IGN; 506 const auto scoped_sigaction = 507 ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(kSigno, sa)); 508 509 // Unblock kSigno so that ignored signals will be discarded. 510 sigset_t mask; 511 sigemptyset(&mask); 512 sigaddset(&mask, kSigno); 513 auto scoped_sigmask = 514 ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, mask)); 515 516 struct sigevent sev = {}; 517 sev.sigev_notify = SIGEV_THREAD_ID; 518 sev.sigev_signo = kSigno; 519 sev.sigev_value.sival_int = kSigvalue; 520 sev.sigev_notify_thread_id = gettid(); 521 auto timer = ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev)); 522 523 constexpr absl::Duration kPeriod = absl::Seconds(1); 524 constexpr int kCycles = 3; 525 struct itimerspec its = {}; 526 its.it_value = its.it_interval = absl::ToTimespec(kPeriod); 527 ASSERT_NO_ERRNO(timer.Set(0, its)); 528 529 // End the sleep one cycle short; we will sleep for one more cycle below. 530 absl::SleepFor(kPeriod * (kCycles - 1)); 531 532 // Block kSigno so that ignored signals will be enqueued. 533 scoped_sigmask.Release()(); 534 scoped_sigmask = ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_BLOCK, mask)); 535 536 // Sleep for 1 more cycle to give the timer time to send a signal. 537 absl::SleepFor(kPeriod + kTimerSlack); 538 539 // At least kCycles expirations should have occurred, resulting in kCycles-1 540 // overruns (the last expiration sent the signal successfully). 541 siginfo_t si; 542 struct timespec zero_ts = absl::ToTimespec(absl::ZeroDuration()); 543 ASSERT_THAT(sigtimedwait(&mask, &si, &zero_ts), 544 SyscallSucceedsWithValue(kSigno)); 545 EXPECT_EQ(si.si_signo, kSigno); 546 EXPECT_EQ(si.si_code, SI_TIMER); 547 EXPECT_EQ(si.si_timerid, timer.get()); 548 EXPECT_GE(si.si_overrun, kCycles - 1); 549 EXPECT_EQ(si.si_int, kSigvalue); 550 551 // Kill the timer, then drain any additional signal it may have enqueued. We 552 // can't do this before the preceding sigtimedwait because stopping or 553 // deleting the timer resets si_overrun to 0. 554 timer.reset(); 555 sigtimedwait(&mask, &si, &zero_ts); 556 } 557 558 } // namespace 559 } // namespace testing 560 } // namespace gvisor 561 562 int main(int argc, char** argv) { 563 gvisor::testing::TestInit(&argc, &argv); 564 565 if (absl::GetFlag(FLAGS_timers_test_sleep)) { 566 while (true) { 567 absl::SleepFor(absl::Seconds(10)); 568 } 569 } 570 571 return gvisor::testing::RunAllTests(); 572 }