gvisor.dev/gvisor@v0.0.0-20240520182842-f9d4d51c7e0f/test/syscalls/linux/mlock.cc (about) 1 // Copyright 2018 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 #include <sys/mman.h> 16 #include <sys/resource.h> 17 #include <sys/syscall.h> 18 #include <unistd.h> 19 20 #include <cerrno> 21 #include <cstring> 22 23 #include "gmock/gmock.h" 24 #include "test/util/capability_util.h" 25 #include "test/util/cleanup.h" 26 #include "test/util/memory_util.h" 27 #include "test/util/multiprocess_util.h" 28 #include "test/util/rlimit_util.h" 29 #include "test/util/test_util.h" 30 31 using ::testing::_; 32 33 namespace gvisor { 34 namespace testing { 35 36 namespace { 37 38 PosixErrorOr<bool> CanMlock() { 39 struct rlimit rlim; 40 if (getrlimit(RLIMIT_MEMLOCK, &rlim) < 0) { 41 return PosixError(errno, "getrlimit(RLIMIT_MEMLOCK)"); 42 } 43 if (rlim.rlim_cur != 0) { 44 return true; 45 } 46 return HaveCapability(CAP_IPC_LOCK); 47 } 48 49 // Returns true if the page containing addr is mlocked. 50 bool IsPageMlocked(uintptr_t addr) { 51 // This relies on msync(MS_INVALIDATE) interacting correctly with mlocked 52 // pages, which is tested for by the MsyncInvalidate case below. 53 int const rv = msync(reinterpret_cast<void*>(addr & ~(kPageSize - 1)), 54 kPageSize, MS_ASYNC | MS_INVALIDATE); 55 if (rv == 0) { 56 return false; 57 } 58 // This uses TEST_PCHECK_MSG since it's used in subprocesses. 59 TEST_PCHECK_MSG(errno == EBUSY, "msync failed with unexpected errno"); 60 return true; 61 } 62 63 TEST(MlockTest, Basic) { 64 SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock())); 65 auto const mapping = ASSERT_NO_ERRNO_AND_VALUE( 66 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE)); 67 EXPECT_FALSE(IsPageMlocked(mapping.addr())); 68 ASSERT_THAT(mlock(mapping.ptr(), mapping.len()), SyscallSucceeds()); 69 EXPECT_TRUE(IsPageMlocked(mapping.addr())); 70 } 71 72 TEST(MlockTest, ProtNone) { 73 SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock())); 74 auto const mapping = 75 ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_NONE, MAP_PRIVATE)); 76 EXPECT_FALSE(IsPageMlocked(mapping.addr())); 77 ASSERT_THAT(mlock(mapping.ptr(), mapping.len()), 78 SyscallFailsWithErrno(ENOMEM)); 79 // ENOMEM is returned because mlock can't populate the page, but it's still 80 // considered locked. 81 EXPECT_TRUE(IsPageMlocked(mapping.addr())); 82 } 83 84 TEST(MlockTest, MadviseDontneed) { 85 SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock())); 86 auto const mapping = ASSERT_NO_ERRNO_AND_VALUE( 87 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE)); 88 ASSERT_THAT(mlock(mapping.ptr(), mapping.len()), SyscallSucceeds()); 89 EXPECT_THAT(madvise(mapping.ptr(), mapping.len(), MADV_DONTNEED), 90 SyscallFailsWithErrno(EINVAL)); 91 } 92 93 TEST(MlockTest, MsyncInvalidate) { 94 SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock())); 95 auto const mapping = ASSERT_NO_ERRNO_AND_VALUE( 96 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE)); 97 ASSERT_THAT(mlock(mapping.ptr(), mapping.len()), SyscallSucceeds()); 98 EXPECT_THAT(msync(mapping.ptr(), mapping.len(), MS_ASYNC | MS_INVALIDATE), 99 SyscallFailsWithErrno(EBUSY)); 100 EXPECT_THAT(msync(mapping.ptr(), mapping.len(), MS_SYNC | MS_INVALIDATE), 101 SyscallFailsWithErrno(EBUSY)); 102 } 103 104 TEST(MlockTest, Fork) { 105 SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock())); 106 auto const mapping = ASSERT_NO_ERRNO_AND_VALUE( 107 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE)); 108 EXPECT_FALSE(IsPageMlocked(mapping.addr())); 109 ASSERT_THAT(mlock(mapping.ptr(), mapping.len()), SyscallSucceeds()); 110 EXPECT_TRUE(IsPageMlocked(mapping.addr())); 111 EXPECT_THAT( 112 InForkedProcess([&] { TEST_CHECK(!IsPageMlocked(mapping.addr())); }), 113 IsPosixErrorOkAndHolds(0)); 114 } 115 116 TEST(MlockTest, RlimitMemlockZero) { 117 AutoCapability cap(CAP_IPC_LOCK, false); 118 Cleanup reset_rlimit = 119 ASSERT_NO_ERRNO_AND_VALUE(ScopedSetSoftRlimit(RLIMIT_MEMLOCK, 0)); 120 auto const mapping = ASSERT_NO_ERRNO_AND_VALUE( 121 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE)); 122 EXPECT_FALSE(IsPageMlocked(mapping.addr())); 123 ASSERT_THAT(mlock(mapping.ptr(), mapping.len()), 124 SyscallFailsWithErrno(EPERM)); 125 } 126 127 TEST(MlockTest, RlimitMemlockInsufficient) { 128 AutoCapability cap(CAP_IPC_LOCK, false); 129 Cleanup reset_rlimit = 130 ASSERT_NO_ERRNO_AND_VALUE(ScopedSetSoftRlimit(RLIMIT_MEMLOCK, kPageSize)); 131 auto const mapping = ASSERT_NO_ERRNO_AND_VALUE( 132 MmapAnon(2 * kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE)); 133 EXPECT_FALSE(IsPageMlocked(mapping.addr())); 134 ASSERT_THAT(mlock(mapping.ptr(), mapping.len()), 135 SyscallFailsWithErrno(ENOMEM)); 136 } 137 138 TEST(MunlockTest, Basic) { 139 SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock())); 140 auto const mapping = ASSERT_NO_ERRNO_AND_VALUE( 141 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE)); 142 EXPECT_FALSE(IsPageMlocked(mapping.addr())); 143 ASSERT_THAT(mlock(mapping.ptr(), mapping.len()), SyscallSucceeds()); 144 EXPECT_TRUE(IsPageMlocked(mapping.addr())); 145 ASSERT_THAT(munlock(mapping.ptr(), mapping.len()), SyscallSucceeds()); 146 EXPECT_FALSE(IsPageMlocked(mapping.addr())); 147 } 148 149 TEST(MunlockTest, NotLocked) { 150 SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock())); 151 auto const mapping = ASSERT_NO_ERRNO_AND_VALUE( 152 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE)); 153 EXPECT_FALSE(IsPageMlocked(mapping.addr())); 154 EXPECT_THAT(munlock(mapping.ptr(), mapping.len()), SyscallSucceeds()); 155 EXPECT_FALSE(IsPageMlocked(mapping.addr())); 156 } 157 158 // There is currently no test for mlockall(MCL_CURRENT) because the default 159 // RLIMIT_MEMLOCK of 64 KB is insufficient to actually invoke 160 // mlockall(MCL_CURRENT). 161 162 TEST(MlockallTest, Future) { 163 SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock())); 164 165 // Run this test in a separate (single-threaded) subprocess to ensure that a 166 // background thread doesn't try to mmap a large amount of memory, fail due 167 // to hitting RLIMIT_MEMLOCK, and explode the process violently. 168 auto const do_test = [] { 169 auto const mapping = 170 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE).ValueOrDie(); 171 TEST_CHECK(!IsPageMlocked(mapping.addr())); 172 TEST_PCHECK(mlockall(MCL_FUTURE) == 0); 173 // Ensure that mlockall(MCL_FUTURE) is turned off before the end of the 174 // test, as otherwise mmaps may fail unexpectedly. 175 Cleanup do_munlockall([] { TEST_PCHECK(munlockall() == 0); }); 176 auto const mapping2 = 177 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE).ValueOrDie(); 178 TEST_CHECK(IsPageMlocked(mapping2.addr())); 179 // Fire munlockall() and check that it disables mlockall(MCL_FUTURE). 180 do_munlockall.Release()(); 181 auto const mapping3 = 182 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE).ValueOrDie(); 183 TEST_CHECK(!IsPageMlocked(mapping2.addr())); 184 }; 185 EXPECT_THAT(InForkedProcess(do_test), IsPosixErrorOkAndHolds(0)); 186 } 187 188 TEST(MunlockallTest, Basic) { 189 SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock())); 190 auto const mapping = ASSERT_NO_ERRNO_AND_VALUE( 191 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED)); 192 EXPECT_TRUE(IsPageMlocked(mapping.addr())); 193 ASSERT_THAT(munlockall(), SyscallSucceeds()); 194 EXPECT_FALSE(IsPageMlocked(mapping.addr())); 195 } 196 197 #ifndef SYS_mlock2 198 #if defined(__x86_64__) 199 #define SYS_mlock2 325 200 #elif defined(__aarch64__) 201 #define SYS_mlock2 284 202 #endif 203 #endif 204 205 #ifndef MLOCK_ONFAULT 206 #define MLOCK_ONFAULT 0x01 // Linux: include/uapi/asm-generic/mman-common.h 207 #endif 208 209 #ifdef SYS_mlock2 210 211 int mlock2(void const* addr, size_t len, int flags) { 212 return syscall(SYS_mlock2, addr, len, flags); 213 } 214 215 TEST(Mlock2Test, NoFlags) { 216 SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock())); 217 auto const mapping = ASSERT_NO_ERRNO_AND_VALUE( 218 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE)); 219 EXPECT_FALSE(IsPageMlocked(mapping.addr())); 220 ASSERT_THAT(mlock2(mapping.ptr(), mapping.len(), 0), SyscallSucceeds()); 221 EXPECT_TRUE(IsPageMlocked(mapping.addr())); 222 } 223 224 TEST(Mlock2Test, MlockOnfault) { 225 SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock())); 226 auto const mapping = ASSERT_NO_ERRNO_AND_VALUE( 227 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE)); 228 EXPECT_FALSE(IsPageMlocked(mapping.addr())); 229 ASSERT_THAT(mlock2(mapping.ptr(), mapping.len(), MLOCK_ONFAULT), 230 SyscallSucceeds()); 231 EXPECT_TRUE(IsPageMlocked(mapping.addr())); 232 } 233 234 TEST(Mlock2Test, UnknownFlags) { 235 SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock())); 236 auto const mapping = ASSERT_NO_ERRNO_AND_VALUE( 237 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE)); 238 EXPECT_THAT(mlock2(mapping.ptr(), mapping.len(), ~0), 239 SyscallFailsWithErrno(EINVAL)); 240 } 241 242 #endif // defined(SYS_mlock2) 243 244 TEST(MapLockedTest, Basic) { 245 SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock())); 246 auto const mapping = ASSERT_NO_ERRNO_AND_VALUE( 247 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED)); 248 EXPECT_TRUE(IsPageMlocked(mapping.addr())); 249 EXPECT_THAT(munlock(mapping.ptr(), mapping.len()), SyscallSucceeds()); 250 EXPECT_FALSE(IsPageMlocked(mapping.addr())); 251 } 252 253 TEST(MapLockedTest, RlimitMemlockZero) { 254 AutoCapability cap(CAP_IPC_LOCK, false); 255 Cleanup reset_rlimit = 256 ASSERT_NO_ERRNO_AND_VALUE(ScopedSetSoftRlimit(RLIMIT_MEMLOCK, 0)); 257 EXPECT_THAT( 258 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED), 259 PosixErrorIs(EPERM, _)); 260 } 261 262 TEST(MapLockedTest, RlimitMemlockInsufficient) { 263 AutoCapability cap(CAP_IPC_LOCK, false); 264 Cleanup reset_rlimit = 265 ASSERT_NO_ERRNO_AND_VALUE(ScopedSetSoftRlimit(RLIMIT_MEMLOCK, kPageSize)); 266 EXPECT_THAT( 267 MmapAnon(2 * kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED), 268 PosixErrorIs(EAGAIN, _)); 269 } 270 271 TEST(MremapLockedTest, Basic) { 272 SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock())); 273 auto mapping = ASSERT_NO_ERRNO_AND_VALUE( 274 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED)); 275 EXPECT_TRUE(IsPageMlocked(mapping.addr())); 276 277 void* addr = mremap(mapping.ptr(), mapping.len(), 2 * mapping.len(), 278 MREMAP_MAYMOVE, nullptr); 279 if (addr == MAP_FAILED) { 280 FAIL() << "mremap failed: " << errno << " (" << strerror(errno) << ")"; 281 } 282 mapping.release(); 283 mapping.reset(addr, 2 * mapping.len()); 284 EXPECT_TRUE(IsPageMlocked(reinterpret_cast<uintptr_t>(addr))); 285 } 286 287 TEST(MremapLockedTest, RlimitMemlockZero) { 288 SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock())); 289 auto mapping = ASSERT_NO_ERRNO_AND_VALUE( 290 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED)); 291 EXPECT_TRUE(IsPageMlocked(mapping.addr())); 292 293 AutoCapability cap(CAP_IPC_LOCK, false); 294 Cleanup reset_rlimit = 295 ASSERT_NO_ERRNO_AND_VALUE(ScopedSetSoftRlimit(RLIMIT_MEMLOCK, 0)); 296 void* addr = mremap(mapping.ptr(), mapping.len(), 2 * mapping.len(), 297 MREMAP_MAYMOVE, nullptr); 298 EXPECT_TRUE(addr == MAP_FAILED && errno == EAGAIN) 299 << "addr = " << addr << ", errno = " << errno; 300 } 301 302 TEST(MremapLockedTest, RlimitMemlockInsufficient) { 303 SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock())); 304 auto mapping = ASSERT_NO_ERRNO_AND_VALUE( 305 MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED)); 306 EXPECT_TRUE(IsPageMlocked(mapping.addr())); 307 308 AutoCapability cap(CAP_IPC_LOCK, false); 309 Cleanup reset_rlimit = ASSERT_NO_ERRNO_AND_VALUE( 310 ScopedSetSoftRlimit(RLIMIT_MEMLOCK, mapping.len())); 311 void* addr = mremap(mapping.ptr(), mapping.len(), 2 * mapping.len(), 312 MREMAP_MAYMOVE, nullptr); 313 EXPECT_TRUE(addr == MAP_FAILED && errno == EAGAIN) 314 << "addr = " << addr << ", errno = " << errno; 315 } 316 317 } // namespace 318 319 } // namespace testing 320 } // namespace gvisor