gvisor.dev/gvisor@v0.0.0-20240520182842-f9d4d51c7e0f/test/perf/linux/mapping_benchmark.cc (about)

     1  // Copyright 2020 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  #include <stdlib.h>
    16  #include <sys/mman.h>
    17  #include <unistd.h>
    18  
    19  #include "gtest/gtest.h"
    20  #include "benchmark/benchmark.h"
    21  #include "test/util/logging.h"
    22  #include "test/util/memory_util.h"
    23  #include "test/util/posix_error.h"
    24  #include "test/util/test_util.h"
    25  
    26  namespace gvisor {
    27  namespace testing {
    28  
    29  namespace {
    30  
    31  // Conservative value for /proc/sys/vm/max_map_count, which limits the number of
    32  // VMAs, minus a safety margin for VMAs that already exist for the test binary.
    33  // The default value for max_map_count is
    34  // include/linux/mm.h:DEFAULT_MAX_MAP_COUNT = 65530.
    35  constexpr size_t kMaxVMAs = 64001;
    36  
    37  // Map then unmap pages without touching them.
    38  void BM_MapUnmap(benchmark::State& state) {
    39    // Number of pages to map.
    40    const int pages = state.range(0);
    41  
    42    while (state.KeepRunning()) {
    43      void* addr = mmap(0, pages * kPageSize, PROT_READ | PROT_WRITE,
    44                        MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    45      TEST_CHECK_MSG(addr != MAP_FAILED, "mmap failed");
    46  
    47      int ret = munmap(addr, pages * kPageSize);
    48      TEST_CHECK_MSG(ret == 0, "munmap failed");
    49    }
    50  }
    51  
    52  BENCHMARK(BM_MapUnmap)->Range(1, 1 << 17)->UseRealTime();
    53  
    54  // Map, touch, then unmap pages.
    55  void BM_MapTouchUnmap(benchmark::State& state) {
    56    // Number of pages to map.
    57    const int pages = state.range(0);
    58  
    59    while (state.KeepRunning()) {
    60      void* addr = mmap(0, pages * kPageSize, PROT_READ | PROT_WRITE,
    61                        MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    62      TEST_CHECK_MSG(addr != MAP_FAILED, "mmap failed");
    63  
    64      char* c = reinterpret_cast<char*>(addr);
    65      char* end = c + pages * kPageSize;
    66      while (c < end) {
    67        *c = 42;
    68        c += kPageSize;
    69      }
    70  
    71      int ret = munmap(addr, pages * kPageSize);
    72      TEST_CHECK_MSG(ret == 0, "munmap failed");
    73    }
    74  }
    75  
    76  BENCHMARK(BM_MapTouchUnmap)->Range(1, 1 << 17)->UseRealTime();
    77  
    78  // Map and touch many pages, unmapping all at once.
    79  //
    80  // NOTE(b/111429208): This is a regression test to ensure performant mapping and
    81  // allocation even with tons of mappings.
    82  void BM_MapTouchMany(benchmark::State& state) {
    83    // Number of pages to map.
    84    const int page_count = state.range(0);
    85  
    86    while (state.KeepRunning()) {
    87      std::vector<void*> pages;
    88  
    89      for (int i = 0; i < page_count; i++) {
    90        void* addr = mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE,
    91                          MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    92        TEST_CHECK_MSG(addr != MAP_FAILED, "mmap failed");
    93  
    94        char* c = reinterpret_cast<char*>(addr);
    95        *c = 42;
    96  
    97        pages.push_back(addr);
    98      }
    99  
   100      for (void* addr : pages) {
   101        int ret = munmap(addr, kPageSize);
   102        TEST_CHECK_MSG(ret == 0, "munmap failed");
   103      }
   104    }
   105  
   106    state.SetBytesProcessed(kPageSize * page_count * state.iterations());
   107  }
   108  
   109  BENCHMARK(BM_MapTouchMany)->Range(1, 1 << 12)->UseRealTime();
   110  
   111  void BM_PageFault(benchmark::State& state) {
   112    // Map the region in which we will take page faults. To ensure that each page
   113    // fault maps only a single page, each page we touch must correspond to a
   114    // distinct VMA. Thus we need a 1-page gap between each 1-page VMA. However,
   115    // each gap consists of a PROT_NONE VMA, instead of an unmapped hole, so that
   116    // if there are background threads running, they can't inadvertently creating
   117    // mappings in our gaps that are unmapped when the test ends.
   118    size_t test_pages = kMaxVMAs;
   119    // Ensure that test_pages is odd, since we want the test region to both
   120    // begin and end with a mapped page.
   121    if (test_pages % 2 == 0) {
   122      test_pages--;
   123    }
   124    const size_t test_region_bytes = test_pages * kPageSize;
   125    // Use MAP_SHARED here because madvise(MADV_DONTNEED) on private mappings on
   126    // gVisor won't force future sentry page faults (by design). Use MAP_POPULATE
   127    // so that Linux pre-allocates the shmem file used to back the mapping.
   128    Mapping m = ASSERT_NO_ERRNO_AND_VALUE(
   129        MmapAnon(test_region_bytes, PROT_READ, MAP_SHARED | MAP_POPULATE));
   130    for (size_t i = 0; i < test_pages / 2; i++) {
   131      ASSERT_THAT(
   132          mprotect(reinterpret_cast<void*>(m.addr() + ((2 * i + 1) * kPageSize)),
   133                   kPageSize, PROT_NONE),
   134          SyscallSucceeds());
   135    }
   136  
   137    const size_t mapped_pages = test_pages / 2 + 1;
   138    // "Start" at the end of the mapped region to force the mapped region to be
   139    // reset, since we mapped it with MAP_POPULATE.
   140    size_t cur_page = mapped_pages;
   141    for (auto _ : state) {
   142      if (cur_page >= mapped_pages) {
   143        // We've reached the end of our mapped region and have to reset it to
   144        // incur page faults again.
   145        state.PauseTiming();
   146        ASSERT_THAT(madvise(m.ptr(), test_region_bytes, MADV_DONTNEED),
   147                    SyscallSucceeds());
   148        cur_page = 0;
   149        state.ResumeTiming();
   150      }
   151      const uintptr_t addr = m.addr() + (2 * cur_page * kPageSize);
   152      const char c = *reinterpret_cast<volatile char*>(addr);
   153      benchmark::DoNotOptimize(c);
   154      cur_page++;
   155    }
   156  }
   157  
   158  BENCHMARK(BM_PageFault)->UseRealTime();
   159  
   160  }  // namespace
   161  
   162  }  // namespace testing
   163  }  // namespace gvisor