github.com/google/syzkaller@v0.0.0-20240517125934-c0f1611a36d6/executor/executor_darwin.h (about)

     1  // Copyright 2021 syzkaller project authors. All rights reserved.
     2  // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
     3  
     4  #include <math.h>
     5  #include <sys/ioctl.h>
     6  #include <sys/mman.h>
     7  
     8  // FIXME(HerrSpace): As executor is written in C++, we need to make this patch:
     9  // -struct ksancov_trace *trace = (void *)mc.ptr; into
    10  // +struct ksancov_trace *trace = (ksancov_trace *)mc.ptr;
    11  // twice to make this header compile. This used to be C++ friendly in Catalina,
    12  // but was broken in xnu source drop 7195.50.7.100.1.
    13  #include <ksancov.h>
    14  
    15  static void os_init(int argc, char** argv, void* data, size_t data_size)
    16  {
    17  	// Note: We use is_kernel_64_bit in executor.cc to decide which PC pointer
    18  	// size to expect. However in KSANCOV we always get back 32bit pointers,
    19  	// which then get reconstructed to 64bit pointers by adding a fixed offset.
    20  	is_kernel_64_bit = false;
    21  
    22  	int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
    23  	int flags = MAP_ANON | MAP_PRIVATE | MAP_FIXED;
    24  
    25  	void* got = mmap(data, data_size, prot, flags, -1, 0);
    26  	if (data != got)
    27  		failmsg("mmap of data segment failed", "want %p, got %p", data, got);
    28  
    29  	// Makes sure the file descriptor limit is sufficient to map control pipes.
    30  	struct rlimit rlim;
    31  	rlim.rlim_cur = rlim.rlim_max = kMaxFd;
    32  	setrlimit(RLIMIT_NOFILE, &rlim);
    33  }
    34  
    35  static intptr_t execute_syscall(const call_t* c, intptr_t a[kMaxArgs])
    36  {
    37  	if (c->call)
    38  		return c->call(a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]);
    39  
    40  	return __syscall(c->sys_nr, a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]);
    41  }
    42  
    43  static void cover_open(cover_t* cov, bool extra)
    44  {
    45  	int fd = ksancov_open();
    46  	if (fd == -1)
    47  		fail("open of /dev/ksancov failed");
    48  	if (dup2(fd, cov->fd) < 0)
    49  		failmsg("failed to dup cover fd", "from=%d, to=%d", fd, cov->fd);
    50  	close(fd);
    51  
    52  	// Note: In the other KCOV implementations we pass the shared memory size
    53  	// to the initial ioctl, before mmaping. KSANCOV reversed this logic.
    54  	// Here we instead pass the maximum number of traced PCs to the initial
    55  	// KSANCOV_IOC_TRACE ioctl. We then pass a size_t pointer to the second
    56  	// KSANCOV_IOC_MAP ioctl, hence the kernel is instead telling us the final
    57  	// size. We have a sanity check in executor.cc checking that cov.size isn't
    58  	// larger or equal to kCoverSize. To make sure that assumption holds, we're
    59  	// calculating the max_entries accordingly.
    60  	size_t max_entries = floor(
    61  	    (kCoverSize - sizeof(struct ksancov_trace)) / sizeof(uint32_t));
    62  
    63  	// Note: XNUs KSANCOV API forces us to choose the mode after opening the
    64  	// device and before mmaping the coverage buffer. As the function we are
    65  	// in, cover_open(), expects us to mmap here, we are forced to commit to a
    66  	// mode here as well. For other OSes we commit to a mode in cover_enable(),
    67  	// based on collect_comps. This is not really a problem though, as TRACE_PC
    68  	// is the only relevant mode for us for now. XNU doesn't support TRACE_CMP
    69  	// and we don't care about the counters/nedges modes in XNU.
    70  	if (ksancov_mode_trace(cov->fd, max_entries))
    71  		fail("ioctl init trace write failed");
    72  }
    73  
    74  static void cover_mmap(cover_t* cov)
    75  {
    76  	if (cov->data != NULL)
    77  		fail("cover_mmap invoked on an already mmapped cover_t object");
    78  	uintptr_t mmap_ptr = 0;
    79  	if (ksancov_map(cov->fd, &mmap_ptr, &cov->mmap_alloc_size))
    80  		fail("cover mmap failed");
    81  
    82  	// Sanity check to make sure our assumptions in the max_entries calculation
    83  	// hold up.
    84  	if (cov->mmap_alloc_size > kCoverSize)
    85  		fail("mmap allocation size larger than anticipated");
    86  
    87  	cov->data = (char*)mmap_ptr;
    88  	cov->data_end = cov->data + cov->mmap_alloc_size;
    89  }
    90  
    91  static void cover_protect(cover_t* cov)
    92  {
    93  }
    94  
    95  static void cover_unprotect(cover_t* cov)
    96  {
    97  }
    98  
    99  static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
   100  {
   101  	if (collect_comps)
   102  		fail("TRACE_CMP not implemented on darwin");
   103  	if (extra)
   104  		fail("Extra coverage collection not implemented on darwin");
   105  	// Note: we are already comitted to TRACE_PC here, hence we don't make use
   106  	// of collect_comps. For more details see the comment in cover_open().
   107  	if (ksancov_thread_self(cov->fd))
   108  		exitf("cover enable write trace failed");
   109  }
   110  
   111  static void cover_reset(cover_t* cov)
   112  {
   113  	ksancov_reset((struct ksancov_header*)cov->data);
   114  	ksancov_start((struct ksancov_header*)cov->data);
   115  }
   116  
   117  static void cover_collect(cover_t* cov)
   118  {
   119  	struct ksancov_trace* trace = (struct ksancov_trace*)cov->data;
   120  	cov->size = ksancov_trace_head(trace);
   121  	cov->data_offset = ((int64_t) & (trace->pcs)) - ((int64_t)(cov->data));
   122  	cov->pc_offset = trace->offset;
   123  }
   124  
   125  static bool use_cover_edges(uint64 pc)
   126  {
   127  	return true;
   128  }