github.com/google/syzkaller@v0.0.0-20251211124644-a066d2bc4b02/executor/executor_test.h (about)

     1  // Copyright 2018 syzkaller project authors. All rights reserved.
     2  // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
     3  
     4  #include <stdlib.h>
     5  #include <sys/mman.h>
     6  #include <unistd.h>
     7  
     8  #ifdef __linux__
     9  #include <sys/prctl.h>
    10  #endif
    11  
    12  // sys/targets also know about these consts.
    13  static uint64 kernel_text_start = 0xc0dec0dec0000000;
    14  static uint64 kernel_text_mask = 0xffffff;
    15  
    16  static void os_init(int argc, char** argv, void* data, size_t data_size)
    17  {
    18  #ifdef __linux__
    19  	prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
    20  	// There's a risk that the parent has exited before we get to call prctl().
    21  	// In that case, let's assume that the child must have been reassigned to PID=1.
    22  	if (getppid() == 1)
    23  		exitf("the parent process was killed");
    24  #endif
    25  	void* got = mmap(data, data_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE | MAP_FIXED_EXCLUSIVE, -1, 0);
    26  	if (data != got)
    27  		failmsg("mmap of data segment failed", "want %p, got %p", data, got);
    28  	is_kernel_64_bit = sizeof(unsigned long) == 8;
    29  }
    30  
    31  #ifdef __clang__
    32  #define notrace
    33  #else
    34  #define notrace __attribute__((no_sanitize_coverage))
    35  #endif
    36  
    37  extern "C" notrace void __sanitizer_cov_trace_pc(void)
    38  {
    39  	if (current_thread == nullptr || current_thread->cov.data == nullptr || current_thread->cov.collect_comps)
    40  		return;
    41  	uint64 pc = (uint64)__builtin_return_address(0);
    42  	// Convert to what is_kernel_pc will accept as valid coverage;
    43  	pc = kernel_text_start | (pc & kernel_text_mask);
    44  	// Note: we duplicate the following code instead of using a template function
    45  	// because it must not be instrumented which is hard to achieve for all compiler
    46  	// if the code is in a separate function.
    47  	if (is_kernel_64_bit) {
    48  		uint64* start = (uint64*)current_thread->cov.data;
    49  		uint64* end = (uint64*)current_thread->cov.data_end;
    50  		uint64 pos = start[0];
    51  		if (start + pos + 1 < end) {
    52  			start[0] = pos + 1;
    53  			start[pos + 1] = pc;
    54  		}
    55  	} else {
    56  		uint32* start = (uint32*)current_thread->cov.data;
    57  		uint32* end = (uint32*)current_thread->cov.data_end;
    58  		uint32 pos = start[0];
    59  		if (start + pos + 1 < end) {
    60  			start[0] = pos + 1;
    61  			start[pos + 1] = pc;
    62  		}
    63  	}
    64  }
    65  
    66  static intptr_t execute_syscall(const call_t* c, intptr_t a[kMaxArgs])
    67  {
    68  	// Inject coverage PC even when built w/o coverage instrumentation.
    69  	// This allows to pass machine check with coverage enabled.
    70  	// pkg/fuzzer tests with coverage instrumentation shouldn't be distracted by the additional PC,
    71  	// and syz_inject_cover overwrites the whole array so will remote it.
    72  	__sanitizer_cov_trace_pc();
    73  	return c->call(a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]);
    74  }
    75  
    76  static void cover_open(cover_t* cov, bool extra)
    77  {
    78  	cov->data_size = kCoverSize * sizeof(unsigned long);
    79  }
    80  
    81  static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
    82  {
    83  	cov->collect_comps = collect_comps;
    84  }
    85  
    86  static void cover_reset(cover_t* cov)
    87  {
    88  	*(uint64*)(cov->data) = 0;
    89  }
    90  
    91  static void cover_collect(cover_t* cov)
    92  {
    93  	if (is_kernel_64_bit)
    94  		cov->size = *(uint64*)cov->data;
    95  	else
    96  		cov->size = *(uint32*)cov->data;
    97  }
    98  
    99  static void cover_protect(cover_t* cov)
   100  {
   101  }
   102  
   103  static void cover_mmap(cover_t* cov)
   104  {
   105  	if (cov->mmap_alloc_ptr != NULL)
   106  		fail("cover_mmap invoked on an already mmapped cover_t object");
   107  	if (cov->data_size == 0)
   108  		fail("cover_t structure is corrupted");
   109  	cov->mmap_alloc_size = cov->data_size;
   110  	cov->mmap_alloc_ptr = (char*)mmap(NULL, cov->mmap_alloc_size,
   111  					  PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
   112  	if (cov->mmap_alloc_ptr == MAP_FAILED)
   113  		exitf("cover mmap failed");
   114  	cov->data = cov->mmap_alloc_ptr;
   115  	cov->data_end = cov->data + cov->mmap_alloc_size;
   116  	cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
   117  	// We don't care about the specific PC values for now.
   118  	// Once we do, we might want to consider ASLR here.
   119  	cov->pc_offset = 0;
   120  }
   121  
   122  static void cover_unprotect(cover_t* cov)
   123  {
   124  }
   125  
   126  static long inject_cover(cover_t* cov, long a, long b)
   127  {
   128  	if (cov->data == nullptr)
   129  		return ENOENT;
   130  	uint32 size = std::min((uint32)b, cov->data_size);
   131  	memcpy(cov->data, (void*)a, size);
   132  	memset(cov->data + size, 0xcd, std::min<uint64>(100, cov->data_size - size));
   133  	return 0;
   134  }
   135  
   136  static long syz_inject_cover(volatile long a, volatile long b)
   137  {
   138  	return inject_cover(&current_thread->cov, a, b);
   139  }
   140  
   141  static long syz_inject_remote_cover(volatile long a, volatile long b)
   142  {
   143  	return inject_cover(&extra_cov, a, b);
   144  }
   145  
   146  static const char* setup_fault()
   147  {
   148  	return nullptr;
   149  }
   150  
   151  static const char* setup_leak()
   152  {
   153  	return "leak detection is not supported";
   154  }
   155  
   156  // Test various ways how feature setup can fail.
   157  // We don't care about these features for test OS,
   158  // this is just to test the feature support detection code.
   159  #define SYZ_HAVE_FEATURES 1
   160  static feature_t features[] = {
   161      {rpc::Feature::Fault, setup_fault},
   162      {rpc::Feature::Leak, setup_leak},
   163  };