github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/test/syscalls/linux/itimer.cc (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  #include <signal.h>
    16  #include <sys/socket.h>
    17  #include <sys/time.h>
    18  #include <sys/types.h>
    19  #include <time.h>
    20  
    21  #include <atomic>
    22  #include <functional>
    23  #include <iostream>
    24  #include <vector>
    25  
    26  #include "gmock/gmock.h"
    27  #include "gtest/gtest.h"
    28  #include "absl/strings/string_view.h"
    29  #include "absl/time/clock.h"
    30  #include "absl/time/time.h"
    31  #include "test/util/file_descriptor.h"
    32  #include "test/util/logging.h"
    33  #include "test/util/multiprocess_util.h"
    34  #include "test/util/posix_error.h"
    35  #include "test/util/signal_util.h"
    36  #include "test/util/test_util.h"
    37  #include "test/util/thread_util.h"
    38  #include "test/util/timer_util.h"
    39  
    40  namespace gvisor {
    41  namespace testing {
    42  namespace {
    43  
    44  constexpr char kSIGALRMToMainThread[] = "--itimer_sigarlm_to_main_thread";
    45  constexpr char kSIGPROFFairnessActive[] = "--itimer_sigprof_fairness_active";
    46  constexpr char kSIGPROFFairnessIdle[] = "--itimer_sigprof_fairness_idle";
    47  
    48  // Time period to be set for the itimers.
    49  constexpr absl::Duration kPeriod = absl::Milliseconds(25);
    50  // Total amount of time to spend per thread.
    51  constexpr absl::Duration kTestDuration = absl::Seconds(20);
    52  // Amount of spin iterations to perform as the minimum work item per thread.
    53  // Chosen to be sub-millisecond range.
    54  constexpr int kIterations = 10000000;
    55  // Allow deviation in the number of samples.
    56  constexpr double kNumSamplesDeviationRatio = 0.2;
    57  
    58  TEST(ItimerTest, ItimervalUpdatedBeforeExpiration) {
    59    constexpr int kSleepSecs = 10;
    60    constexpr int kAlarmSecs = 15;
    61    static_assert(
    62        kSleepSecs < kAlarmSecs,
    63        "kSleepSecs must be less than kAlarmSecs for the test to be meaningful");
    64    constexpr int kMaxRemainingSecs = kAlarmSecs - kSleepSecs;
    65  
    66    // Install a no-op handler for SIGALRM.
    67    struct sigaction sa = {};
    68    sigfillset(&sa.sa_mask);
    69    sa.sa_handler = +[](int signo) {};
    70    auto const cleanup_sa =
    71        ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGALRM, sa));
    72  
    73    // Set an itimer-based alarm for kAlarmSecs from now.
    74    struct itimerval itv = {};
    75    itv.it_value.tv_sec = kAlarmSecs;
    76    auto const cleanup_itimer =
    77        ASSERT_NO_ERRNO_AND_VALUE(ScopedItimer(ITIMER_REAL, itv));
    78  
    79    // After sleeping for kSleepSecs, the itimer value should reflect the elapsed
    80    // time even if it hasn't expired.
    81    absl::SleepFor(absl::Seconds(kSleepSecs));
    82    ASSERT_THAT(getitimer(ITIMER_REAL, &itv), SyscallSucceeds());
    83    EXPECT_TRUE(
    84        itv.it_value.tv_sec < kMaxRemainingSecs ||
    85        (itv.it_value.tv_sec == kMaxRemainingSecs && itv.it_value.tv_usec == 0))
    86        << "Remaining time: " << itv.it_value.tv_sec << " seconds + "
    87        << itv.it_value.tv_usec << " microseconds";
    88  }
    89  
    90  ABSL_CONST_INIT static thread_local std::atomic_int signal_test_num_samples =
    91      ATOMIC_VAR_INIT(0);
    92  
    93  void SignalTestSignalHandler(int /*signum*/) { signal_test_num_samples++; }
    94  
    95  struct SignalTestResult {
    96    int expected_total;
    97    int main_thread_samples;
    98    std::vector<int> worker_samples;
    99  };
   100  
   101  std::ostream& operator<<(std::ostream& os, const SignalTestResult& r) {
   102    os << "{expected_total: " << r.expected_total
   103       << ", main_thread_samples: " << r.main_thread_samples
   104       << ", worker_samples: [";
   105    bool first = true;
   106    for (int sample : r.worker_samples) {
   107      if (!first) {
   108        os << ", ";
   109      }
   110      os << sample;
   111      first = false;
   112    }
   113    os << "]}";
   114    return os;
   115  }
   116  
   117  // Starts two worker threads and itimer id and measures the number of signal
   118  // delivered to each thread.
   119  SignalTestResult ItimerSignalTest(int id, clock_t main_clock,
   120                                    clock_t worker_clock, int signal,
   121                                    absl::Duration sleep) {
   122    signal_test_num_samples = 0;
   123  
   124    struct sigaction sa = {};
   125    sa.sa_handler = &SignalTestSignalHandler;
   126    sa.sa_flags = SA_RESTART;
   127    sigemptyset(&sa.sa_mask);
   128    auto sigaction_cleanup = ScopedSigaction(signal, sa).ValueOrDie();
   129  
   130    int socketfds[2];
   131    TEST_PCHECK(socketpair(AF_UNIX, SOCK_STREAM, 0, socketfds) == 0);
   132  
   133    // Do the spinning in the workers.
   134    std::function<void*(int)> work = [&](int socket_fd) {
   135      FileDescriptor fd(socket_fd);
   136  
   137      absl::Time finish = Now(worker_clock) + kTestDuration;
   138      while (Now(worker_clock) < finish) {
   139        // Blocked on read.
   140        char c;
   141        RetryEINTR(read)(fd.get(), &c, 1);
   142        for (int i = 0; i < kIterations; i++) {
   143          // Ensure compiler won't optimize this loop away.
   144          asm("");
   145        }
   146  
   147        if (sleep != absl::ZeroDuration()) {
   148          // Sleep so that the entire process is idle for a while.
   149          absl::SleepFor(sleep);
   150        }
   151  
   152        // Unblock the other thread.
   153        RetryEINTR(write)(fd.get(), &c, 1);
   154      }
   155  
   156      return reinterpret_cast<void*>(signal_test_num_samples.load());
   157    };
   158  
   159    ScopedThread th1(
   160        static_cast<std::function<void*()>>(std::bind(work, socketfds[0])));
   161    ScopedThread th2(
   162        static_cast<std::function<void*()>>(std::bind(work, socketfds[1])));
   163  
   164    absl::Time start = Now(main_clock);
   165    // Start the timer.
   166    struct itimerval timer = {};
   167    timer.it_value = absl::ToTimeval(kPeriod);
   168    timer.it_interval = absl::ToTimeval(kPeriod);
   169    auto cleanup_itimer = ScopedItimer(id, timer).ValueOrDie();
   170  
   171    // Unblock th1.
   172    //
   173    // N.B. th2 owns socketfds[1] but can't close it until it unblocks.
   174    char c = 0;
   175    TEST_CHECK(write(socketfds[1], &c, 1) == 1);
   176  
   177    SignalTestResult result;
   178  
   179    // Wait for the workers to be done and collect their sample counts.
   180    result.worker_samples.push_back(reinterpret_cast<int64_t>(th1.Join()));
   181    result.worker_samples.push_back(reinterpret_cast<int64_t>(th2.Join()));
   182    cleanup_itimer.Release()();
   183    result.expected_total = (Now(main_clock) - start) / kPeriod;
   184    result.main_thread_samples = signal_test_num_samples.load();
   185  
   186    return result;
   187  }
   188  
   189  int TestSIGALRMToMainThread() {
   190    SignalTestResult result =
   191        ItimerSignalTest(ITIMER_REAL, CLOCK_REALTIME, CLOCK_REALTIME, SIGALRM,
   192                         absl::ZeroDuration());
   193  
   194    std::cerr << "result: " << result << std::endl;
   195  
   196    // ITIMER_REAL-generated SIGALRMs prefer to deliver to the thread group leader
   197    // (but don't guarantee it), so we expect to see most samples on the main
   198    // thread.
   199    //
   200    // The number of SIGALRMs delivered to a worker should not exceed 20%
   201    // of the number of total signals expected (this is somewhat arbitrary).
   202    const int worker_threshold = result.expected_total / 5;
   203  
   204    //
   205    // Linux only guarantees timers will never expire before the requested time.
   206    // Thus, we only check the upper bound and also it at least have one sample.
   207    TEST_CHECK(result.main_thread_samples <= result.expected_total);
   208    TEST_CHECK(result.main_thread_samples > 0);
   209    for (int num : result.worker_samples) {
   210      TEST_CHECK_MSG(num <= worker_threshold, "worker received too many samples");
   211    }
   212  
   213    return 0;
   214  }
   215  
   216  // Random save/restore is disabled as it introduces additional latency and
   217  // unpredictable distribution patterns.
   218  TEST(ItimerTest, DeliversSIGALRMToMainThread) {
   219    pid_t child;
   220    int execve_errno;
   221    auto kill = ASSERT_NO_ERRNO_AND_VALUE(
   222        ForkAndExec("/proc/self/exe", {"/proc/self/exe", kSIGALRMToMainThread},
   223                    {}, &child, &execve_errno));
   224    EXPECT_EQ(0, execve_errno);
   225  
   226    int status;
   227    EXPECT_THAT(RetryEINTR(waitpid)(child, &status, 0),
   228                SyscallSucceedsWithValue(child));
   229  
   230    // Not required anymore.
   231    kill.Release();
   232  
   233    EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0) << status;
   234  }
   235  
   236  // Signals are delivered to threads fairly.
   237  //
   238  // sleep indicates how long to sleep worker threads each iteration to make the
   239  // entire process idle.
   240  int TestSIGPROFFairness(absl::Duration sleep) {
   241    SignalTestResult result =
   242        ItimerSignalTest(ITIMER_PROF, CLOCK_PROCESS_CPUTIME_ID,
   243                         CLOCK_THREAD_CPUTIME_ID, SIGPROF, sleep);
   244  
   245    std::cerr << "result: " << result << std::endl;
   246  
   247    // The number of samples on the main thread should be very low as it did
   248    // nothing.
   249    TEST_CHECK(result.main_thread_samples < 80);
   250  
   251    // Both workers should get roughly equal number of samples.
   252    TEST_CHECK(result.worker_samples.size() == 2);
   253  
   254    TEST_CHECK(result.expected_total > 0);
   255  
   256    // In an ideal world each thread would get exactly 50% of the signals,
   257    // but since that's unlikely to happen we allow for them to get no less than
   258    // kNumSamplesDeviationRatio of the total observed samples.
   259    TEST_CHECK_MSG(std::abs(result.worker_samples[0] - result.worker_samples[1]) <
   260                       ((result.worker_samples[0] + result.worker_samples[1]) *
   261                        kNumSamplesDeviationRatio),
   262                   "one worker received disproportionate share of samples");
   263  
   264    return 0;
   265  }
   266  
   267  // Random save/restore is disabled as it introduces additional latency and
   268  // unpredictable distribution patterns.
   269  TEST(ItimerTest, DeliversSIGPROFToThreadsRoughlyFairlyActive) {
   270    // On the KVM and ptrace platforms, switches between sentry and application
   271    // context are sometimes extremely slow, causing the itimer to send SIGPROF to
   272    // a thread that either already has one pending or has had SIGPROF delivered,
   273    // but hasn't handled it yet (and thus therefore still has SIGPROF masked). In
   274    // either case, since itimer signals are group-directed, signal sending falls
   275    // back to notifying the thread group leader. ItimerSignalTest() fails if "too
   276    // many" signals are delivered to the thread group leader, so these tests are
   277    // flaky on these platforms.
   278    //
   279    // TODO(b/143247272): Clarify why context switches are so slow on KVM.
   280    const auto gvisor_platform = GvisorPlatform();
   281    SKIP_IF(gvisor_platform == Platform::kKVM ||
   282            gvisor_platform == Platform::kPtrace);
   283  
   284    pid_t child;
   285    int execve_errno;
   286    auto kill = ASSERT_NO_ERRNO_AND_VALUE(
   287        ForkAndExec("/proc/self/exe", {"/proc/self/exe", kSIGPROFFairnessActive},
   288                    {}, &child, &execve_errno));
   289    EXPECT_EQ(0, execve_errno);
   290  
   291    int status;
   292    EXPECT_THAT(RetryEINTR(waitpid)(child, &status, 0),
   293                SyscallSucceedsWithValue(child));
   294  
   295    // Not required anymore.
   296    kill.Release();
   297  
   298    EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
   299        << "Exited with code: " << status;
   300  }
   301  
   302  // Random save/restore is disabled as it introduces additional latency and
   303  // unpredictable distribution patterns.
   304  TEST(ItimerTest, DeliversSIGPROFToThreadsRoughlyFairlyIdle) {
   305    // See comment in DeliversSIGPROFToThreadsRoughlyFairlyActive.
   306    const auto gvisor_platform = GvisorPlatform();
   307    SKIP_IF(gvisor_platform == Platform::kKVM ||
   308            gvisor_platform == Platform::kPtrace);
   309  
   310    pid_t child;
   311    int execve_errno;
   312    auto kill = ASSERT_NO_ERRNO_AND_VALUE(
   313        ForkAndExec("/proc/self/exe", {"/proc/self/exe", kSIGPROFFairnessIdle},
   314                    {}, &child, &execve_errno));
   315    EXPECT_EQ(0, execve_errno);
   316  
   317    int status;
   318    EXPECT_THAT(RetryEINTR(waitpid)(child, &status, 0),
   319                SyscallSucceedsWithValue(child));
   320  
   321    // Not required anymore.
   322    kill.Release();
   323  
   324    EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
   325        << "Exited with code: " << status;
   326  }
   327  
   328  }  // namespace
   329  }  // namespace testing
   330  }  // namespace gvisor
   331  
   332  namespace {
   333  void MaskSIGPIPE() {
   334    // Always mask SIGPIPE as it's common and tests aren't expected to handle it.
   335    // We don't take the TestInit() path so we must do this manually.
   336    struct sigaction sa = {};
   337    sa.sa_handler = SIG_IGN;
   338    TEST_CHECK(sigaction(SIGPIPE, &sa, nullptr) == 0);
   339  }
   340  }  // namespace
   341  
   342  int main(int argc, char** argv) {
   343    // These tests require no background threads, so check for them before
   344    // TestInit.
   345    for (int i = 0; i < argc; i++) {
   346      absl::string_view arg(argv[i]);
   347  
   348      if (arg == gvisor::testing::kSIGALRMToMainThread) {
   349        MaskSIGPIPE();
   350        return gvisor::testing::TestSIGALRMToMainThread();
   351      }
   352      if (arg == gvisor::testing::kSIGPROFFairnessActive) {
   353        MaskSIGPIPE();
   354        return gvisor::testing::TestSIGPROFFairness(absl::ZeroDuration());
   355      }
   356      if (arg == gvisor::testing::kSIGPROFFairnessIdle) {
   357        MaskSIGPIPE();
   358        // Sleep time > ClockTick (10ms) exercises sleeping gVisor's
   359        // kernel.cpuClockTicker.
   360        return gvisor::testing::TestSIGPROFFairness(absl::Milliseconds(25));
   361      }
   362    }
   363  
   364    gvisor::testing::TestInit(&argc, &argv);
   365    return gvisor::testing::RunAllTests();
   366  }