github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/test/syscalls/linux/futex.cc (about) 1 // Copyright 2018 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 #include <errno.h> 16 #include <linux/futex.h> 17 #include <linux/types.h> 18 #include <sys/syscall.h> 19 #include <sys/time.h> 20 #include <sys/types.h> 21 #include <syscall.h> 22 #include <unistd.h> 23 24 #include <algorithm> 25 #include <atomic> 26 #include <memory> 27 #include <vector> 28 29 #include "gtest/gtest.h" 30 #include "absl/memory/memory.h" 31 #include "absl/time/clock.h" 32 #include "absl/time/time.h" 33 #include "test/util/cleanup.h" 34 #include "test/util/file_descriptor.h" 35 #include "test/util/memory_util.h" 36 #include "test/util/save_util.h" 37 #include "test/util/temp_path.h" 38 #include "test/util/test_util.h" 39 #include "test/util/thread_util.h" 40 #include "test/util/time_util.h" 41 #include "test/util/timer_util.h" 42 43 namespace gvisor { 44 namespace testing { 45 46 namespace { 47 48 // Amount of time we wait for threads doing futex_wait to start running before 49 // doing futex_wake. 50 constexpr auto kWaiterStartupDelay = absl::Seconds(3); 51 52 // Default timeout for waiters in tests where we expect a futex_wake to be 53 // ineffective. 54 constexpr auto kIneffectiveWakeTimeout = absl::Seconds(6); 55 56 static_assert(kWaiterStartupDelay < kIneffectiveWakeTimeout, 57 "futex_wait will time out before futex_wake is called"); 58 59 int futex_wait(bool priv, std::atomic<int>* uaddr, int val, 60 absl::Duration timeout = absl::InfiniteDuration()) { 61 int op = FUTEX_WAIT; 62 if (priv) { 63 op |= FUTEX_PRIVATE_FLAG; 64 } 65 66 if (timeout == absl::InfiniteDuration()) { 67 return RetryEINTR(syscall)(SYS_futex, uaddr, op, val, nullptr); 68 } 69 70 // FUTEX_WAIT doesn't adjust the timeout if it returns EINTR, so we have to do 71 // so. 72 while (true) { 73 auto const timeout_ts = absl::ToTimespec(timeout); 74 MonotonicTimer timer; 75 timer.Start(); 76 int const ret = syscall(SYS_futex, uaddr, op, val, &timeout_ts); 77 if (ret != -1 || errno != EINTR) { 78 return ret; 79 } 80 timeout = std::max(timeout - timer.Duration(), absl::ZeroDuration()); 81 } 82 } 83 84 int futex_wait_bitset(bool priv, std::atomic<int>* uaddr, int val, int bitset, 85 absl::Time deadline = absl::InfiniteFuture()) { 86 int op = FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME; 87 if (priv) { 88 op |= FUTEX_PRIVATE_FLAG; 89 } 90 91 auto const deadline_ts = absl::ToTimespec(deadline); 92 return RetryEINTR(syscall)( 93 SYS_futex, uaddr, op, val, 94 deadline == absl::InfiniteFuture() ? nullptr : &deadline_ts, nullptr, 95 bitset); 96 } 97 98 int futex_wake(bool priv, std::atomic<int>* uaddr, int count) { 99 int op = FUTEX_WAKE; 100 if (priv) { 101 op |= FUTEX_PRIVATE_FLAG; 102 } 103 return syscall(SYS_futex, uaddr, op, count); 104 } 105 106 int futex_wake_bitset(bool priv, std::atomic<int>* uaddr, int count, 107 int bitset) { 108 int op = FUTEX_WAKE_BITSET; 109 if (priv) { 110 op |= FUTEX_PRIVATE_FLAG; 111 } 112 return syscall(SYS_futex, uaddr, op, count, nullptr, nullptr, bitset); 113 } 114 115 int futex_wake_op(bool priv, std::atomic<int>* uaddr1, std::atomic<int>* uaddr2, 116 int nwake1, int nwake2, uint32_t sub_op) { 117 int op = FUTEX_WAKE_OP; 118 if (priv) { 119 op |= FUTEX_PRIVATE_FLAG; 120 } 121 return syscall(SYS_futex, uaddr1, op, nwake1, nwake2, uaddr2, sub_op); 122 } 123 124 int futex_lock_pi(bool priv, std::atomic<int>* uaddr) { 125 int op = FUTEX_LOCK_PI; 126 if (priv) { 127 op |= FUTEX_PRIVATE_FLAG; 128 } 129 int zero = 0; 130 if (uaddr->compare_exchange_strong(zero, gettid())) { 131 return 0; 132 } 133 return RetryEINTR(syscall)(SYS_futex, uaddr, op, nullptr, nullptr); 134 } 135 136 int futex_trylock_pi(bool priv, std::atomic<int>* uaddr) { 137 int op = FUTEX_TRYLOCK_PI; 138 if (priv) { 139 op |= FUTEX_PRIVATE_FLAG; 140 } 141 int zero = 0; 142 if (uaddr->compare_exchange_strong(zero, gettid())) { 143 return 0; 144 } 145 return RetryEINTR(syscall)(SYS_futex, uaddr, op, nullptr, nullptr); 146 } 147 148 int futex_unlock_pi(bool priv, std::atomic<int>* uaddr) { 149 int op = FUTEX_UNLOCK_PI; 150 if (priv) { 151 op |= FUTEX_PRIVATE_FLAG; 152 } 153 int tid = gettid(); 154 if (uaddr->compare_exchange_strong(tid, 0)) { 155 return 0; 156 } 157 return RetryEINTR(syscall)(SYS_futex, uaddr, op, nullptr, nullptr); 158 } 159 160 // Fixture for futex tests parameterized by whether to use private or shared 161 // futexes. 162 class PrivateAndSharedFutexTest : public ::testing::TestWithParam<bool> { 163 protected: 164 bool IsPrivate() const { return GetParam(); } 165 int PrivateFlag() const { return IsPrivate() ? FUTEX_PRIVATE_FLAG : 0; } 166 }; 167 168 // FUTEX_WAIT with 0 timeout does not block. 169 TEST_P(PrivateAndSharedFutexTest, Wait_ZeroTimeout) { 170 struct timespec timeout = {}; 171 172 // Don't use the futex_wait helper because it adjusts timeout. 173 int a = 1; 174 EXPECT_THAT(syscall(SYS_futex, &a, FUTEX_WAIT | PrivateFlag(), a, &timeout), 175 SyscallFailsWithErrno(ETIMEDOUT)); 176 } 177 178 TEST_P(PrivateAndSharedFutexTest, Wait_Timeout) { 179 std::atomic<int> a = ATOMIC_VAR_INIT(1); 180 181 MonotonicTimer timer; 182 timer.Start(); 183 constexpr absl::Duration kTimeout = absl::Seconds(1); 184 EXPECT_THAT(futex_wait(IsPrivate(), &a, a, kTimeout), 185 SyscallFailsWithErrno(ETIMEDOUT)); 186 EXPECT_GE(timer.Duration(), kTimeout); 187 } 188 189 TEST_P(PrivateAndSharedFutexTest, Wait_BitsetTimeout) { 190 std::atomic<int> a = ATOMIC_VAR_INIT(1); 191 192 MonotonicTimer timer; 193 timer.Start(); 194 constexpr absl::Duration kTimeout = absl::Seconds(1); 195 EXPECT_THAT( 196 futex_wait_bitset(IsPrivate(), &a, a, 0xffffffff, absl::Now() + kTimeout), 197 SyscallFailsWithErrno(ETIMEDOUT)); 198 EXPECT_GE(timer.Duration(), kTimeout); 199 } 200 201 TEST_P(PrivateAndSharedFutexTest, WaitBitset_NegativeTimeout) { 202 std::atomic<int> a = ATOMIC_VAR_INIT(1); 203 204 MonotonicTimer timer; 205 timer.Start(); 206 EXPECT_THAT(futex_wait_bitset(IsPrivate(), &a, a, 0xffffffff, 207 absl::Now() - absl::Seconds(1)), 208 SyscallFailsWithErrno(ETIMEDOUT)); 209 } 210 211 TEST_P(PrivateAndSharedFutexTest, Wait_WrongVal) { 212 std::atomic<int> a = ATOMIC_VAR_INIT(1); 213 EXPECT_THAT(futex_wait(IsPrivate(), &a, a + 1), 214 SyscallFailsWithErrno(EAGAIN)); 215 } 216 217 TEST_P(PrivateAndSharedFutexTest, Wait_ZeroBitset) { 218 std::atomic<int> a = ATOMIC_VAR_INIT(1); 219 EXPECT_THAT(futex_wait_bitset(IsPrivate(), &a, a, 0), 220 SyscallFailsWithErrno(EINVAL)); 221 } 222 223 TEST_P(PrivateAndSharedFutexTest, Wake1) { 224 constexpr int kInitialValue = 1; 225 std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue); 226 227 // Prevent save/restore from interrupting futex_wait, which will cause it to 228 // return EAGAIN instead of the expected result if futex_wait is restarted 229 // after we change the value of a below. 230 DisableSave ds; 231 ScopedThread thread([&] { 232 EXPECT_THAT(futex_wait(IsPrivate(), &a, kInitialValue), 233 SyscallSucceedsWithValue(0)); 234 }); 235 absl::SleepFor(kWaiterStartupDelay); 236 237 // Change a so that if futex_wake happens before futex_wait, the latter 238 // returns EAGAIN instead of hanging the test. 239 a.fetch_add(1); 240 EXPECT_THAT(futex_wake(IsPrivate(), &a, 1), SyscallSucceedsWithValue(1)); 241 } 242 243 TEST_P(PrivateAndSharedFutexTest, Wake0) { 244 constexpr int kInitialValue = 1; 245 std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue); 246 247 // Prevent save/restore from interrupting futex_wait, which will cause it to 248 // return EAGAIN instead of the expected result if futex_wait is restarted 249 // after we change the value of a below. 250 DisableSave ds; 251 ScopedThread thread([&] { 252 EXPECT_THAT(futex_wait(IsPrivate(), &a, kInitialValue), 253 SyscallSucceedsWithValue(0)); 254 }); 255 absl::SleepFor(kWaiterStartupDelay); 256 257 // Change a so that if futex_wake happens before futex_wait, the latter 258 // returns EAGAIN instead of hanging the test. 259 a.fetch_add(1); 260 // The Linux kernel wakes one waiter even if val is 0 or negative. 261 EXPECT_THAT(futex_wake(IsPrivate(), &a, 0), SyscallSucceedsWithValue(1)); 262 } 263 264 TEST_P(PrivateAndSharedFutexTest, WakeAll) { 265 constexpr int kInitialValue = 1; 266 std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue); 267 268 DisableSave ds; 269 constexpr int kThreads = 5; 270 std::vector<std::unique_ptr<ScopedThread>> threads; 271 threads.reserve(kThreads); 272 for (int i = 0; i < kThreads; i++) { 273 threads.push_back(absl::make_unique<ScopedThread>([&] { 274 EXPECT_THAT(futex_wait(IsPrivate(), &a, kInitialValue), 275 SyscallSucceeds()); 276 })); 277 } 278 absl::SleepFor(kWaiterStartupDelay); 279 280 a.fetch_add(1); 281 EXPECT_THAT(futex_wake(IsPrivate(), &a, kThreads), 282 SyscallSucceedsWithValue(kThreads)); 283 } 284 285 TEST_P(PrivateAndSharedFutexTest, WakeSome) { 286 constexpr int kInitialValue = 1; 287 std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue); 288 289 DisableSave ds; 290 constexpr int kThreads = 5; 291 constexpr int kWokenThreads = 3; 292 static_assert(kWokenThreads < kThreads, 293 "can't wake more threads than are created"); 294 std::vector<std::unique_ptr<ScopedThread>> threads; 295 threads.reserve(kThreads); 296 std::vector<int> rets; 297 rets.reserve(kThreads); 298 std::vector<int> errs; 299 errs.reserve(kThreads); 300 for (int i = 0; i < kThreads; i++) { 301 rets.push_back(-1); 302 errs.push_back(0); 303 } 304 for (int i = 0; i < kThreads; i++) { 305 threads.push_back(absl::make_unique<ScopedThread>([&, i] { 306 rets[i] = 307 futex_wait(IsPrivate(), &a, kInitialValue, kIneffectiveWakeTimeout); 308 errs[i] = errno; 309 })); 310 } 311 absl::SleepFor(kWaiterStartupDelay); 312 313 a.fetch_add(1); 314 EXPECT_THAT(futex_wake(IsPrivate(), &a, kWokenThreads), 315 SyscallSucceedsWithValue(kWokenThreads)); 316 317 int woken = 0; 318 int timedout = 0; 319 for (int i = 0; i < kThreads; i++) { 320 threads[i]->Join(); 321 if (rets[i] == 0) { 322 woken++; 323 } else if (errs[i] == ETIMEDOUT) { 324 timedout++; 325 } else { 326 ADD_FAILURE() << " thread " << i << ": returned " << rets[i] << ", errno " 327 << errs[i]; 328 } 329 } 330 EXPECT_EQ(woken, kWokenThreads); 331 EXPECT_EQ(timedout, kThreads - kWokenThreads); 332 } 333 334 TEST_P(PrivateAndSharedFutexTest, WaitBitset_Wake) { 335 constexpr int kInitialValue = 1; 336 std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue); 337 338 DisableSave ds; 339 ScopedThread thread([&] { 340 EXPECT_THAT(futex_wait_bitset(IsPrivate(), &a, kInitialValue, 0b01001000), 341 SyscallSucceeds()); 342 }); 343 absl::SleepFor(kWaiterStartupDelay); 344 345 a.fetch_add(1); 346 EXPECT_THAT(futex_wake(IsPrivate(), &a, 1), SyscallSucceedsWithValue(1)); 347 } 348 349 TEST_P(PrivateAndSharedFutexTest, Wait_WakeBitset) { 350 constexpr int kInitialValue = 1; 351 std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue); 352 353 DisableSave ds; 354 ScopedThread thread([&] { 355 EXPECT_THAT(futex_wait(IsPrivate(), &a, kInitialValue), SyscallSucceeds()); 356 }); 357 absl::SleepFor(kWaiterStartupDelay); 358 359 a.fetch_add(1); 360 EXPECT_THAT(futex_wake_bitset(IsPrivate(), &a, 1, 0b01001000), 361 SyscallSucceedsWithValue(1)); 362 } 363 364 TEST_P(PrivateAndSharedFutexTest, WaitBitset_WakeBitsetMatch) { 365 constexpr int kInitialValue = 1; 366 std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue); 367 368 constexpr int kBitset = 0b01001000; 369 370 DisableSave ds; 371 ScopedThread thread([&] { 372 EXPECT_THAT(futex_wait_bitset(IsPrivate(), &a, kInitialValue, kBitset), 373 SyscallSucceeds()); 374 }); 375 absl::SleepFor(kWaiterStartupDelay); 376 377 a.fetch_add(1); 378 EXPECT_THAT(futex_wake_bitset(IsPrivate(), &a, 1, kBitset), 379 SyscallSucceedsWithValue(1)); 380 } 381 382 TEST_P(PrivateAndSharedFutexTest, WaitBitset_WakeBitsetNoMatch) { 383 constexpr int kInitialValue = 1; 384 std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue); 385 386 constexpr int kWaitBitset = 0b01000001; 387 constexpr int kWakeBitset = 0b00101000; 388 static_assert((kWaitBitset & kWakeBitset) == 0, 389 "futex_wake_bitset will wake waiter"); 390 391 DisableSave ds; 392 ScopedThread thread([&] { 393 EXPECT_THAT(futex_wait_bitset(IsPrivate(), &a, kInitialValue, kWaitBitset, 394 absl::Now() + kIneffectiveWakeTimeout), 395 SyscallFailsWithErrno(ETIMEDOUT)); 396 }); 397 absl::SleepFor(kWaiterStartupDelay); 398 399 a.fetch_add(1); 400 EXPECT_THAT(futex_wake_bitset(IsPrivate(), &a, 1, kWakeBitset), 401 SyscallSucceedsWithValue(0)); 402 } 403 404 TEST_P(PrivateAndSharedFutexTest, WakeOpCondSuccess) { 405 constexpr int kInitialValue = 1; 406 std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue); 407 std::atomic<int> b = ATOMIC_VAR_INIT(kInitialValue); 408 409 DisableSave ds; 410 ScopedThread thread_a([&] { 411 EXPECT_THAT(futex_wait(IsPrivate(), &a, kInitialValue), SyscallSucceeds()); 412 }); 413 ScopedThread thread_b([&] { 414 EXPECT_THAT(futex_wait(IsPrivate(), &b, kInitialValue), SyscallSucceeds()); 415 }); 416 absl::SleepFor(kWaiterStartupDelay); 417 418 a.fetch_add(1); 419 b.fetch_add(1); 420 // This futex_wake_op should: 421 // - Wake 1 waiter on a unconditionally. 422 // - Wake 1 waiter on b if b == kInitialValue + 1, which it is. 423 // - Do "b += 1". 424 EXPECT_THAT(futex_wake_op(IsPrivate(), &a, &b, 1, 1, 425 FUTEX_OP(FUTEX_OP_ADD, 1, FUTEX_OP_CMP_EQ, 426 (kInitialValue + 1))), 427 SyscallSucceedsWithValue(2)); 428 EXPECT_EQ(b, kInitialValue + 2); 429 } 430 431 TEST_P(PrivateAndSharedFutexTest, WakeOpCondFailure) { 432 constexpr int kInitialValue = 1; 433 std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue); 434 std::atomic<int> b = ATOMIC_VAR_INIT(kInitialValue); 435 436 DisableSave ds; 437 ScopedThread thread_a([&] { 438 EXPECT_THAT(futex_wait(IsPrivate(), &a, kInitialValue), SyscallSucceeds()); 439 }); 440 ScopedThread thread_b([&] { 441 EXPECT_THAT( 442 futex_wait(IsPrivate(), &b, kInitialValue, kIneffectiveWakeTimeout), 443 SyscallFailsWithErrno(ETIMEDOUT)); 444 }); 445 absl::SleepFor(kWaiterStartupDelay); 446 447 a.fetch_add(1); 448 b.fetch_add(1); 449 // This futex_wake_op should: 450 // - Wake 1 waiter on a unconditionally. 451 // - Wake 1 waiter on b if b == kInitialValue - 1, which it isn't. 452 // - Do "b += 1". 453 EXPECT_THAT(futex_wake_op(IsPrivate(), &a, &b, 1, 1, 454 FUTEX_OP(FUTEX_OP_ADD, 1, FUTEX_OP_CMP_EQ, 455 (kInitialValue - 1))), 456 SyscallSucceedsWithValue(1)); 457 EXPECT_EQ(b, kInitialValue + 2); 458 } 459 460 TEST_P(PrivateAndSharedFutexTest, NoWakeInterprocessPrivateAnon) { 461 auto const mapping = ASSERT_NO_ERRNO_AND_VALUE( 462 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE)); 463 auto const ptr = static_cast<std::atomic<int>*>(mapping.ptr()); 464 constexpr int kInitialValue = 1; 465 ptr->store(kInitialValue); 466 467 DisableSave ds; 468 pid_t const child_pid = fork(); 469 if (child_pid == 0) { 470 TEST_PCHECK(futex_wait(IsPrivate(), ptr, kInitialValue, 471 kIneffectiveWakeTimeout) == -1 && 472 errno == ETIMEDOUT); 473 _exit(0); 474 } 475 ASSERT_THAT(child_pid, SyscallSucceeds()); 476 absl::SleepFor(kWaiterStartupDelay); 477 478 EXPECT_THAT(futex_wake(IsPrivate(), ptr, 1), SyscallSucceedsWithValue(0)); 479 480 int status; 481 ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), 482 SyscallSucceedsWithValue(child_pid)); 483 EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0) 484 << " status " << status; 485 } 486 487 TEST_P(PrivateAndSharedFutexTest, WakeAfterCOWBreak) { 488 // Use a futex on a non-stack mapping so we can be sure that the child process 489 // below isn't the one that breaks copy-on-write. 490 auto const mapping = ASSERT_NO_ERRNO_AND_VALUE( 491 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE)); 492 auto const ptr = static_cast<std::atomic<int>*>(mapping.ptr()); 493 constexpr int kInitialValue = 1; 494 ptr->store(kInitialValue); 495 496 DisableSave ds; 497 ScopedThread thread([&] { 498 EXPECT_THAT(futex_wait(IsPrivate(), ptr, kInitialValue), SyscallSucceeds()); 499 }); 500 absl::SleepFor(kWaiterStartupDelay); 501 502 pid_t const child_pid = fork(); 503 if (child_pid == 0) { 504 // Wait to be killed by the parent. 505 while (true) pause(); 506 } 507 ASSERT_THAT(child_pid, SyscallSucceeds()); 508 auto cleanup_child = Cleanup([&] { 509 EXPECT_THAT(kill(child_pid, SIGKILL), SyscallSucceeds()); 510 int status; 511 ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), 512 SyscallSucceedsWithValue(child_pid)); 513 EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL) 514 << " status " << status; 515 }); 516 517 // In addition to preventing a late futex_wait from sleeping, this breaks 518 // copy-on-write on the mapped page. 519 ptr->fetch_add(1); 520 EXPECT_THAT(futex_wake(IsPrivate(), ptr, 1), SyscallSucceedsWithValue(1)); 521 } 522 523 TEST_P(PrivateAndSharedFutexTest, WakeWrongKind) { 524 constexpr int kInitialValue = 1; 525 std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue); 526 527 DisableSave ds; 528 ScopedThread thread([&] { 529 EXPECT_THAT( 530 futex_wait(IsPrivate(), &a, kInitialValue, kIneffectiveWakeTimeout), 531 SyscallFailsWithErrno(ETIMEDOUT)); 532 }); 533 absl::SleepFor(kWaiterStartupDelay); 534 535 a.fetch_add(1); 536 // The value of priv passed to futex_wake is the opposite of that passed to 537 // the futex_waiter; we expect this not to wake the waiter. 538 EXPECT_THAT(futex_wake(!IsPrivate(), &a, 1), SyscallSucceedsWithValue(0)); 539 } 540 541 INSTANTIATE_TEST_SUITE_P(SharedPrivate, PrivateAndSharedFutexTest, 542 ::testing::Bool()); 543 544 // Passing null as the address only works for private futexes. 545 546 TEST(PrivateFutexTest, WakeOp0Set) { 547 std::atomic<int> a = ATOMIC_VAR_INIT(1); 548 549 int futex_op = FUTEX_OP(FUTEX_OP_SET, 2, 0, 0); 550 EXPECT_THAT(futex_wake_op(true, nullptr, &a, 0, 0, futex_op), 551 SyscallSucceedsWithValue(0)); 552 EXPECT_EQ(a, 2); 553 } 554 555 TEST(PrivateFutexTest, WakeOp0Add) { 556 std::atomic<int> a = ATOMIC_VAR_INIT(1); 557 int futex_op = FUTEX_OP(FUTEX_OP_ADD, 1, 0, 0); 558 EXPECT_THAT(futex_wake_op(true, nullptr, &a, 0, 0, futex_op), 559 SyscallSucceedsWithValue(0)); 560 EXPECT_EQ(a, 2); 561 } 562 563 TEST(PrivateFutexTest, WakeOp0Or) { 564 std::atomic<int> a = ATOMIC_VAR_INIT(0b01); 565 int futex_op = FUTEX_OP(FUTEX_OP_OR, 0b10, 0, 0); 566 EXPECT_THAT(futex_wake_op(true, nullptr, &a, 0, 0, futex_op), 567 SyscallSucceedsWithValue(0)); 568 EXPECT_EQ(a, 0b11); 569 } 570 571 TEST(PrivateFutexTest, WakeOp0Andn) { 572 std::atomic<int> a = ATOMIC_VAR_INIT(0b11); 573 int futex_op = FUTEX_OP(FUTEX_OP_ANDN, 0b10, 0, 0); 574 EXPECT_THAT(futex_wake_op(true, nullptr, &a, 0, 0, futex_op), 575 SyscallSucceedsWithValue(0)); 576 EXPECT_EQ(a, 0b01); 577 } 578 579 TEST(PrivateFutexTest, WakeOp0Xor) { 580 std::atomic<int> a = ATOMIC_VAR_INIT(0b1010); 581 int futex_op = FUTEX_OP(FUTEX_OP_XOR, 0b1100, 0, 0); 582 EXPECT_THAT(futex_wake_op(true, nullptr, &a, 0, 0, futex_op), 583 SyscallSucceedsWithValue(0)); 584 EXPECT_EQ(a, 0b0110); 585 } 586 587 TEST(SharedFutexTest, WakeInterprocessSharedAnon) { 588 auto const mapping = ASSERT_NO_ERRNO_AND_VALUE( 589 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED)); 590 auto const ptr = static_cast<std::atomic<int>*>(mapping.ptr()); 591 constexpr int kInitialValue = 1; 592 ptr->store(kInitialValue); 593 594 DisableSave ds; 595 pid_t const child_pid = fork(); 596 if (child_pid == 0) { 597 TEST_PCHECK(futex_wait(false, ptr, kInitialValue) == 0); 598 _exit(0); 599 } 600 ASSERT_THAT(child_pid, SyscallSucceeds()); 601 auto kill_child = Cleanup( 602 [&] { EXPECT_THAT(kill(child_pid, SIGKILL), SyscallSucceeds()); }); 603 absl::SleepFor(kWaiterStartupDelay); 604 605 ptr->fetch_add(1); 606 // This is an ASSERT so that if it fails, we immediately abort the test (and 607 // kill the subprocess). 608 ASSERT_THAT(futex_wake(false, ptr, 1), SyscallSucceedsWithValue(1)); 609 610 kill_child.Release(); 611 int status; 612 ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), 613 SyscallSucceedsWithValue(child_pid)); 614 EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0) 615 << " status " << status; 616 } 617 618 TEST(SharedFutexTest, WakeInterprocessFile) { 619 auto const file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile()); 620 ASSERT_THAT(truncate(file.path().c_str(), kPageSize), SyscallSucceeds()); 621 auto const fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR)); 622 auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(Mmap( 623 nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0)); 624 auto const ptr = static_cast<std::atomic<int>*>(mapping.ptr()); 625 constexpr int kInitialValue = 1; 626 ptr->store(kInitialValue); 627 628 DisableSave ds; 629 pid_t const child_pid = fork(); 630 if (child_pid == 0) { 631 TEST_PCHECK(futex_wait(false, ptr, kInitialValue) == 0); 632 _exit(0); 633 } 634 ASSERT_THAT(child_pid, SyscallSucceeds()); 635 auto kill_child = Cleanup( 636 [&] { EXPECT_THAT(kill(child_pid, SIGKILL), SyscallSucceeds()); }); 637 absl::SleepFor(kWaiterStartupDelay); 638 639 ptr->fetch_add(1); 640 // This is an ASSERT so that if it fails, we immediately abort the test (and 641 // kill the subprocess). 642 ASSERT_THAT(futex_wake(false, ptr, 1), SyscallSucceedsWithValue(1)); 643 644 kill_child.Release(); 645 int status; 646 ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), 647 SyscallSucceedsWithValue(child_pid)); 648 EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0) 649 << " status " << status; 650 } 651 652 TEST_P(PrivateAndSharedFutexTest, PIBasic) { 653 std::atomic<int> a = ATOMIC_VAR_INIT(0); 654 655 ASSERT_THAT(futex_lock_pi(IsPrivate(), &a), SyscallSucceeds()); 656 EXPECT_EQ(a.load(), gettid()); 657 EXPECT_THAT(futex_lock_pi(IsPrivate(), &a), SyscallFailsWithErrno(EDEADLK)); 658 659 ASSERT_THAT(futex_unlock_pi(IsPrivate(), &a), SyscallSucceeds()); 660 EXPECT_EQ(a.load(), 0); 661 EXPECT_THAT(futex_unlock_pi(IsPrivate(), &a), SyscallFailsWithErrno(EPERM)); 662 } 663 664 TEST_P(PrivateAndSharedFutexTest, PIConcurrency) { 665 DisableSave ds; // Too many syscalls. 666 667 std::atomic<int> a = ATOMIC_VAR_INIT(0); 668 const bool is_priv = IsPrivate(); 669 670 std::unique_ptr<ScopedThread> threads[100]; 671 for (size_t i = 0; i < ABSL_ARRAYSIZE(threads); ++i) { 672 threads[i] = absl::make_unique<ScopedThread>([is_priv, &a] { 673 for (size_t j = 0; j < 10; ++j) { 674 ASSERT_THAT(futex_lock_pi(is_priv, &a), SyscallSucceeds()); 675 EXPECT_EQ(a.load() & FUTEX_TID_MASK, gettid()); 676 SleepSafe(absl::Milliseconds(5)); 677 ASSERT_THAT(futex_unlock_pi(is_priv, &a), SyscallSucceeds()); 678 } 679 }); 680 } 681 } 682 683 TEST_P(PrivateAndSharedFutexTest, PIWaiters) { 684 std::atomic<int> a = ATOMIC_VAR_INIT(0); 685 const bool is_priv = IsPrivate(); 686 687 ASSERT_THAT(futex_lock_pi(is_priv, &a), SyscallSucceeds()); 688 EXPECT_EQ(a.load(), gettid()); 689 690 ScopedThread th([is_priv, &a] { 691 ASSERT_THAT(futex_lock_pi(is_priv, &a), SyscallSucceeds()); 692 ASSERT_THAT(futex_unlock_pi(is_priv, &a), SyscallSucceeds()); 693 }); 694 695 // Wait until the thread blocks on the futex, setting the waiters bit. 696 auto start = absl::Now(); 697 while (a.load() != (FUTEX_WAITERS | gettid())) { 698 ASSERT_LT(absl::Now() - start, absl::Seconds(5)); 699 absl::SleepFor(absl::Milliseconds(100)); 700 } 701 ASSERT_THAT(futex_unlock_pi(is_priv, &a), SyscallSucceeds()); 702 } 703 704 TEST_P(PrivateAndSharedFutexTest, PITryLock) { 705 std::atomic<int> a = ATOMIC_VAR_INIT(0); 706 const bool is_priv = IsPrivate(); 707 708 ASSERT_THAT(futex_trylock_pi(IsPrivate(), &a), SyscallSucceeds()); 709 EXPECT_EQ(a.load(), gettid()); 710 711 EXPECT_THAT(futex_trylock_pi(is_priv, &a), SyscallFailsWithErrno(EDEADLK)); 712 ScopedThread th([is_priv, &a] { 713 EXPECT_THAT(futex_trylock_pi(is_priv, &a), SyscallFailsWithErrno(EAGAIN)); 714 }); 715 th.Join(); 716 717 ASSERT_THAT(futex_unlock_pi(IsPrivate(), &a), SyscallSucceeds()); 718 } 719 720 TEST_P(PrivateAndSharedFutexTest, PITryLockConcurrency) { 721 DisableSave ds; // Too many syscalls. 722 723 std::atomic<int> a = ATOMIC_VAR_INIT(0); 724 const bool is_priv = IsPrivate(); 725 726 std::unique_ptr<ScopedThread> threads[10]; 727 for (size_t i = 0; i < ABSL_ARRAYSIZE(threads); ++i) { 728 threads[i] = absl::make_unique<ScopedThread>([is_priv, &a] { 729 for (size_t j = 0; j < 10;) { 730 if (futex_trylock_pi(is_priv, &a) == 0) { 731 ++j; 732 EXPECT_EQ(a.load() & FUTEX_TID_MASK, gettid()); 733 SleepSafe(absl::Milliseconds(5)); 734 ASSERT_THAT(futex_unlock_pi(is_priv, &a), SyscallSucceeds()); 735 } 736 } 737 }); 738 } 739 } 740 741 int get_robust_list(int pid, struct robust_list_head** head_ptr, 742 size_t* len_ptr) { 743 return syscall(__NR_get_robust_list, pid, head_ptr, len_ptr); 744 } 745 746 int set_robust_list(struct robust_list_head* head, size_t len) { 747 return syscall(__NR_set_robust_list, head, len); 748 } 749 750 TEST(RobustFutexTest, BasicSetGet) { 751 struct robust_list_head hd = {}; 752 struct robust_list_head* hd_ptr = &hd; 753 754 // Set! 755 EXPECT_THAT(set_robust_list(hd_ptr, sizeof(hd)), SyscallSucceedsWithValue(0)); 756 757 // Get! 758 struct robust_list_head* new_hd_ptr = hd_ptr; 759 size_t len; 760 EXPECT_THAT(get_robust_list(0, &new_hd_ptr, &len), 761 SyscallSucceedsWithValue(0)); 762 EXPECT_EQ(new_hd_ptr, hd_ptr); 763 EXPECT_EQ(len, sizeof(hd)); 764 } 765 766 TEST(RobustFutexTest, GetFromOtherTid) { 767 // Get the current tid and list head. 768 pid_t tid = gettid(); 769 struct robust_list_head* hd_ptr = {}; 770 size_t len; 771 EXPECT_THAT(get_robust_list(0, &hd_ptr, &len), SyscallSucceedsWithValue(0)); 772 773 // Create a new thread. 774 ScopedThread t([&] { 775 // Current tid list head should be different from parent tid. 776 struct robust_list_head* got_hd_ptr = {}; 777 EXPECT_THAT(get_robust_list(0, &got_hd_ptr, &len), 778 SyscallSucceedsWithValue(0)); 779 EXPECT_NE(hd_ptr, got_hd_ptr); 780 781 // Get the parent list head by passing its tid. 782 EXPECT_THAT(get_robust_list(tid, &got_hd_ptr, &len), 783 SyscallSucceedsWithValue(0)); 784 EXPECT_EQ(hd_ptr, got_hd_ptr); 785 }); 786 787 // Wait for thread. 788 t.Join(); 789 } 790 791 TEST(RobustFutexTest, InvalidSize) { 792 struct robust_list_head* hd = {}; 793 EXPECT_THAT(set_robust_list(hd, sizeof(*hd) + 1), 794 SyscallFailsWithErrno(EINVAL)); 795 } 796 797 TEST(RobustFutexTest, PthreadMutexAttr) { 798 constexpr int kNumMutexes = 3; 799 800 // Create a bunch of robust mutexes. 801 pthread_mutexattr_t attrs[kNumMutexes]; 802 pthread_mutex_t mtxs[kNumMutexes]; 803 for (int i = 0; i < kNumMutexes; i++) { 804 TEST_PCHECK(pthread_mutexattr_init(&attrs[i]) == 0); 805 TEST_PCHECK(pthread_mutexattr_setrobust(&attrs[i], PTHREAD_MUTEX_ROBUST) == 806 0); 807 TEST_PCHECK(pthread_mutex_init(&mtxs[i], &attrs[i]) == 0); 808 } 809 810 // Start thread to lock the mutexes and then exit. 811 ScopedThread t([&] { 812 for (int i = 0; i < kNumMutexes; i++) { 813 TEST_PCHECK(pthread_mutex_lock(&mtxs[i]) == 0); 814 } 815 pthread_exit(NULL); 816 }); 817 818 // Wait for thread. 819 t.Join(); 820 821 // Now try to take the mutexes. 822 for (int i = 0; i < kNumMutexes; i++) { 823 // Should get EOWNERDEAD. 824 EXPECT_EQ(pthread_mutex_lock(&mtxs[i]), EOWNERDEAD); 825 // Make the mutex consistent. 826 EXPECT_EQ(pthread_mutex_consistent(&mtxs[i]), 0); 827 // Unlock. 828 EXPECT_EQ(pthread_mutex_unlock(&mtxs[i]), 0); 829 } 830 } 831 832 } // namespace 833 } // namespace testing 834 } // namespace gvisor