github.com/google/syzkaller@v0.0.0-20251211124644-a066d2bc4b02/executor/common_kvm_arm64.h (about)

     1  #ifndef EXECUTOR_COMMON_KVM_ARM64_H
     2  #define EXECUTOR_COMMON_KVM_ARM64_H
     3  
     4  // Copyright 2017 syzkaller project authors. All rights reserved.
     5  // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
     6  
     7  // This file is shared between executor and csource package.
     8  
     9  // Implementation of syz_kvm_setup_cpu pseudo-syscall.
    10  #include <sys/mman.h>
    11  
    12  #include "common_kvm.h"
    13  #include "kvm.h"
    14  
    15  #if SYZ_EXECUTOR || __NR_syz_kvm_setup_cpu || __NR_syz_kvm_add_vcpu || __NR_syz_kvm_setup_syzos_vm
    16  #include "common_kvm_arm64_syzos.h"
    17  #endif
    18  
    19  #if SYZ_EXECUTOR || __NR_syz_kvm_setup_cpu || __NR_syz_kvm_add_vcpu
    20  // Register encodings from https://docs.kernel.org/virt/kvm/api.html.
    21  #define KVM_ARM64_REGS_X0 0x6030000000100000UL
    22  #define KVM_ARM64_REGS_X1 0x6030000000100002UL
    23  #define KVM_ARM64_REGS_PC 0x6030000000100040UL
    24  #define KVM_ARM64_REGS_SP_EL1 0x6030000000100044UL
    25  #define KVM_ARM64_REGS_TPIDR_EL1 0x603000000013c684
    26  
    27  struct kvm_text {
    28  	uintptr_t typ;
    29  	const void* text;
    30  	uintptr_t size;
    31  };
    32  
    33  struct kvm_opt {
    34  	uint64 typ;
    35  	uint64 val;
    36  };
    37  #endif
    38  
    39  #if SYZ_EXECUTOR || __NR_syz_kvm_setup_cpu || __NR_syz_kvm_setup_syzos_vm
    40  struct addr_size {
    41  	void* addr;
    42  	size_t size;
    43  };
    44  
    45  static struct addr_size alloc_guest_mem(struct addr_size* free, size_t size)
    46  {
    47  	struct addr_size ret = {.addr = NULL, .size = 0};
    48  
    49  	if (free->size < size)
    50  		return ret;
    51  	ret.addr = free->addr;
    52  	ret.size = size;
    53  	free->addr = (void*)((char*)free->addr + size);
    54  	free->size -= size;
    55  	return ret;
    56  }
    57  
    58  // Call KVM_SET_USER_MEMORY_REGION for the given pages.
    59  static void vm_set_user_memory_region(int vmfd, uint32 slot, uint32 flags, uint64 guest_phys_addr, uint64 memory_size, uint64 userspace_addr)
    60  {
    61  	struct kvm_userspace_memory_region memreg;
    62  	memreg.slot = slot;
    63  	memreg.flags = flags;
    64  	memreg.guest_phys_addr = guest_phys_addr;
    65  	memreg.memory_size = memory_size;
    66  	memreg.userspace_addr = userspace_addr;
    67  	ioctl(vmfd, KVM_SET_USER_MEMORY_REGION, &memreg);
    68  }
    69  
    70  #define ADRP_OPCODE 0x90000000
    71  #define ADRP_OPCODE_MASK 0x9f000000
    72  
    73  // Code loading SyzOS into guest memory does not handle data relocations (see
    74  // https://github.com/google/syzkaller/issues/5565), so SyzOS will crash soon after encountering an
    75  // ADRP instruction. Detect these instructions to catch regressions early.
    76  // The most common reason for using data relocaions is accessing global variables and constants.
    77  // Sometimes the compiler may choose to emit a read-only constant to zero-initialize a structure
    78  // or to generate a jump table for a switch statement.
    79  static void validate_guest_code(void* mem, size_t size)
    80  {
    81  	uint32* insns = (uint32*)mem;
    82  	for (size_t i = 0; i < size / 4; i++) {
    83  		if ((insns[i] & ADRP_OPCODE_MASK) == ADRP_OPCODE)
    84  			fail("ADRP instruction detected in SyzOS, exiting");
    85  	}
    86  }
    87  
    88  static void install_syzos_code(void* host_mem, size_t mem_size)
    89  {
    90  	size_t size = (char*)&__stop_guest - (char*)&__start_guest;
    91  	if (size > mem_size)
    92  		fail("SyzOS size exceeds guest memory");
    93  	memcpy(host_mem, &__start_guest, size);
    94  	validate_guest_code(host_mem, size);
    95  }
    96  
    97  static void setup_vm(int vmfd, void* host_mem, void** text_slot)
    98  {
    99  	// Guest physical memory layout (must be in sync with executor/kvm.h):
   100  	// 0x00000000 - unused pages
   101  	// 0x08000000 - GICv3 distributor region (MMIO, no memory allocated)
   102  	// 0x080a0000 - GICv3 redistributor region (MMIO, no memory allocated)
   103  	// 0xdddd0000 - unmapped region to trigger a page faults for uexits etc. (1 page)
   104  	// 0xdddd1000 - writable region with KVM_MEM_LOG_DIRTY_PAGES to fuzz dirty ring (2 pages)
   105  	// 0xeeee0000 - user code (4 pages)
   106  	// 0xeeee8000 - executor guest code (4 pages)
   107  	// 0xeeef0000 - scratch memory for code generated at runtime (1 page)
   108  	// 0xffff1000 - EL1 stack (1 page)
   109  	struct addr_size allocator = {.addr = host_mem, .size = KVM_GUEST_MEM_SIZE};
   110  	int slot = 0; // Slot numbers do not matter, they just have to be different.
   111  
   112  	struct addr_size host_text = alloc_guest_mem(&allocator, 4 * KVM_PAGE_SIZE);
   113  	install_syzos_code(host_text.addr, host_text.size);
   114  	vm_set_user_memory_region(vmfd, slot++, KVM_MEM_READONLY, SYZOS_ADDR_EXECUTOR_CODE, host_text.size, (uintptr_t)host_text.addr);
   115  
   116  	struct addr_size next = alloc_guest_mem(&allocator, 2 * KVM_PAGE_SIZE);
   117  	vm_set_user_memory_region(vmfd, slot++, KVM_MEM_LOG_DIRTY_PAGES, ARM64_ADDR_DIRTY_PAGES, next.size, (uintptr_t)next.addr);
   118  
   119  	next = alloc_guest_mem(&allocator, KVM_MAX_VCPU * KVM_PAGE_SIZE);
   120  	vm_set_user_memory_region(vmfd, slot++, KVM_MEM_READONLY, ARM64_ADDR_USER_CODE, next.size, (uintptr_t)next.addr);
   121  	if (text_slot)
   122  		*text_slot = next.addr;
   123  
   124  	next = alloc_guest_mem(&allocator, KVM_PAGE_SIZE);
   125  	vm_set_user_memory_region(vmfd, slot++, 0, ARM64_ADDR_EL1_STACK_BOTTOM, next.size, (uintptr_t)next.addr);
   126  
   127  	next = alloc_guest_mem(&allocator, KVM_PAGE_SIZE);
   128  	vm_set_user_memory_region(vmfd, slot++, 0, ARM64_ADDR_SCRATCH_CODE, next.size, (uintptr_t)next.addr);
   129  
   130  	// Allocate memory for the ITS tables: 64K for the device table, collection table, command queue, property table,
   131  	// plus 64K * 4 CPUs for the pending tables, and 64K * 16 devices for the ITT tables.
   132  	int its_size = SZ_64K * (4 + 4 + 16);
   133  	next = alloc_guest_mem(&allocator, its_size);
   134  	vm_set_user_memory_region(vmfd, slot++, 0, ARM64_ADDR_ITS_TABLES, next.size, (uintptr_t)next.addr);
   135  
   136  	// Map the remaining pages at address 0.
   137  	next = alloc_guest_mem(&allocator, allocator.size);
   138  	vm_set_user_memory_region(vmfd, slot++, 0, 0, next.size, (uintptr_t)next.addr);
   139  }
   140  #endif
   141  
   142  #if SYZ_EXECUTOR || __NR_syz_kvm_setup_cpu || __NR_syz_kvm_add_vcpu
   143  // Set the value of the specified register.
   144  static void vcpu_set_reg(int vcpu_fd, uint64 id, uint64 val)
   145  {
   146  	struct kvm_one_reg reg = {.id = id, .addr = (uint64)&val};
   147  	ioctl(vcpu_fd, KVM_SET_ONE_REG, &reg);
   148  }
   149  
   150  // Set up CPU registers.
   151  static void reset_cpu_regs(int cpufd, int cpu_id, size_t text_size)
   152  {
   153  	// PC points to the relative offset of guest_main() within the guest code.
   154  	vcpu_set_reg(cpufd, KVM_ARM64_REGS_PC, executor_fn_guest_addr(guest_main));
   155  	vcpu_set_reg(cpufd, KVM_ARM64_REGS_SP_EL1, ARM64_ADDR_EL1_STACK_BOTTOM + KVM_PAGE_SIZE - 128);
   156  	// Store the CPU ID in TPIDR_EL1.
   157  	vcpu_set_reg(cpufd, KVM_ARM64_REGS_TPIDR_EL1, cpu_id);
   158  	// Pass parameters to guest_main().
   159  	vcpu_set_reg(cpufd, KVM_ARM64_REGS_X0, text_size);
   160  	vcpu_set_reg(cpufd, KVM_ARM64_REGS_X1, cpu_id);
   161  }
   162  
   163  static void install_user_code(int cpufd, void* user_text_slot, int cpu_id, const void* text, size_t text_size)
   164  {
   165  	if ((cpu_id < 0) || (cpu_id >= KVM_MAX_VCPU))
   166  		return;
   167  	if (!user_text_slot)
   168  		return;
   169  	if (text_size > KVM_PAGE_SIZE)
   170  		text_size = KVM_PAGE_SIZE;
   171  	void* target = (void*)((uint64)user_text_slot + (KVM_PAGE_SIZE * cpu_id));
   172  	memcpy(target, text, text_size);
   173  	reset_cpu_regs(cpufd, cpu_id, text_size);
   174  }
   175  
   176  static void setup_cpu_with_opts(int vmfd, int cpufd, const struct kvm_opt* opt, int opt_count)
   177  {
   178  	uint32 features = 0;
   179  	if (opt_count > 1)
   180  		opt_count = 1;
   181  	for (int i = 0; i < opt_count; i++) {
   182  		uint64 typ = opt[i].typ;
   183  		uint64 val = opt[i].val;
   184  		switch (typ) {
   185  		case 1:
   186  			features = val;
   187  			break;
   188  		}
   189  	}
   190  
   191  	struct kvm_vcpu_init init;
   192  	// Queries KVM for preferred CPU target type.
   193  	ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, &init);
   194  	init.features[0] = features;
   195  	// Use the modified struct kvm_vcpu_init to initialize the virtual CPU.
   196  	ioctl(cpufd, KVM_ARM_VCPU_INIT, &init);
   197  }
   198  
   199  #endif
   200  
   201  #if SYZ_EXECUTOR || __NR_syz_kvm_setup_cpu
   202  // syz_kvm_setup_cpu(fd fd_kvmvm, cpufd fd_kvmcpu, usermem vma[24], text ptr[in, array[kvm_text, 1]], ntext len[text], flags flags[kvm_setup_flags], opts ptr[in, array[kvm_setup_opt, 0:2]], nopt len[opts])
   203  static volatile long syz_kvm_setup_cpu(volatile long a0, volatile long a1, volatile long a2, volatile long a3, volatile long a4, volatile long a5, volatile long a6, volatile long a7)
   204  {
   205  	const int vmfd = a0;
   206  	const int cpufd = a1;
   207  	void* const host_mem = (void*)a2;
   208  	const struct kvm_text* const text_array_ptr = (struct kvm_text*)a3;
   209  	const uintptr_t text_count = a4;
   210  	const uintptr_t flags = a5;
   211  	const struct kvm_opt* const opt_array_ptr = (struct kvm_opt*)a6;
   212  	uintptr_t opt_count = a7;
   213  
   214  	(void)flags;
   215  	(void)opt_count;
   216  
   217  	(void)text_count; // fuzzer can spoof count and we need just 1 text, so ignore text_count
   218  	int text_type = text_array_ptr[0].typ;
   219  	const void* text = text_array_ptr[0].text;
   220  	size_t text_size = text_array_ptr[0].size;
   221  	(void)text_type;
   222  
   223  	void* user_text_slot = NULL;
   224  	setup_vm(vmfd, host_mem, &user_text_slot);
   225  	setup_cpu_with_opts(vmfd, cpufd, opt_array_ptr, opt_count);
   226  
   227  	// Assume CPU is 0.
   228  	install_user_code(cpufd, user_text_slot, 0, text, text_size);
   229  	return 0;
   230  }
   231  #endif
   232  
   233  #if SYZ_EXECUTOR || __NR_syz_kvm_setup_syzos_vm || __NR_syz_kvm_add_vcpu
   234  struct kvm_syz_vm {
   235  	int vmfd;
   236  	int next_cpu_id;
   237  	void* user_text;
   238  };
   239  #endif
   240  
   241  #if SYZ_EXECUTOR || __NR_syz_kvm_setup_syzos_vm
   242  
   243  static long syz_kvm_setup_syzos_vm(volatile long a0, volatile long a1)
   244  {
   245  	const int vmfd = a0;
   246  	void* host_mem = (void*)a1;
   247  
   248  	void* user_text_slot = NULL;
   249  	struct kvm_syz_vm* ret = (struct kvm_syz_vm*)host_mem;
   250  	host_mem = (void*)((uint64)host_mem + KVM_PAGE_SIZE);
   251  	setup_vm(vmfd, host_mem, &user_text_slot);
   252  	ret->vmfd = vmfd;
   253  	ret->next_cpu_id = 0;
   254  	ret->user_text = user_text_slot;
   255  	return (long)ret;
   256  }
   257  #endif
   258  
   259  #if SYZ_EXECUTOR || __NR_syz_kvm_add_vcpu
   260  static long syz_kvm_add_vcpu(volatile long a0, volatile long a1, volatile long a2, volatile long a3)
   261  {
   262  	struct kvm_syz_vm* vm = (struct kvm_syz_vm*)a0;
   263  	struct kvm_text* utext = (struct kvm_text*)a1;
   264  	const void* text = utext->text;
   265  	size_t text_size = utext->size;
   266  	const struct kvm_opt* const opt_array_ptr = (struct kvm_opt*)a2;
   267  	uintptr_t opt_count = a3;
   268  
   269  	if (!vm) {
   270  		errno = EINVAL;
   271  		return -1;
   272  	}
   273  	if (vm->next_cpu_id == KVM_MAX_VCPU) {
   274  		errno = ENOMEM;
   275  		return -1;
   276  	}
   277  	int cpu_id = vm->next_cpu_id;
   278  	int cpufd = ioctl(vm->vmfd, KVM_CREATE_VCPU, cpu_id);
   279  	if (cpufd == -1)
   280  		return -1;
   281  	// Only increment next_cpu_id if CPU creation succeeded.
   282  	vm->next_cpu_id++;
   283  	setup_cpu_with_opts(vm->vmfd, cpufd, opt_array_ptr, opt_count);
   284  	install_user_code(cpufd, vm->user_text, cpu_id, text, text_size);
   285  	return cpufd;
   286  }
   287  #endif
   288  
   289  #if SYZ_EXECUTOR || __NR_syz_kvm_vgic_v3_setup
   290  static int kvm_set_device_attr(int dev_fd, uint32 group, uint64 attr, void* val)
   291  {
   292  	struct kvm_device_attr kvmattr = {
   293  	    .flags = 0,
   294  	    .group = group,
   295  	    .attr = attr,
   296  	    .addr = (uintptr_t)val,
   297  	};
   298  
   299  	return ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr);
   300  }
   301  
   302  static int kvm_create_device(int vm_fd, int type)
   303  {
   304  	struct kvm_create_device create_dev = {
   305  	    .type = (uint32)type,
   306  	    .fd = (uint32)-1,
   307  	    .flags = 0,
   308  	};
   309  
   310  	if (ioctl(vm_fd, KVM_CREATE_DEVICE, &create_dev) != -1)
   311  		return create_dev.fd;
   312  	else
   313  		return -1;
   314  }
   315  
   316  #define REDIST_REGION_ATTR_ADDR(count, base, flags, index) \
   317  	(((uint64)(count) << 52) |                         \
   318  	 ((uint64)((base) >> 16) << 16) |                  \
   319  	 ((uint64)(flags) << 12) |                         \
   320  	 index)
   321  
   322  // Set up the VGICv3 interrupt controller.
   323  // syz_kvm_vgic_v3_setup(fd fd_kvmvm, ncpus flags[kvm_num_cpus], nirqs flags[kvm_num_irqs])
   324  static long syz_kvm_vgic_v3_setup(volatile long a0, volatile long a1, volatile long a2)
   325  {
   326  	const int vm_fd = a0;
   327  	const int nr_vcpus = a1;
   328  	const int want_nr_irq = a2;
   329  
   330  	int vgic_fd = kvm_create_device(vm_fd, KVM_DEV_TYPE_ARM_VGIC_V3);
   331  	if (vgic_fd == -1)
   332  		return -1;
   333  
   334  	uint32 nr_irq = want_nr_irq;
   335  	int ret = kvm_set_device_attr(vgic_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0, &nr_irq);
   336  	if (ret == -1) {
   337  		close(vgic_fd);
   338  		return -1;
   339  	}
   340  
   341  	uint64 gicd_base_gpa = ARM64_ADDR_GICD_BASE;
   342  	ret = kvm_set_device_attr(vgic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_DIST, &gicd_base_gpa);
   343  	if (ret == -1) {
   344  		close(vgic_fd);
   345  		return -1;
   346  	}
   347  	uint64 redist_attr = REDIST_REGION_ATTR_ADDR(nr_vcpus, ARM64_ADDR_GICR_BASE, 0, 0);
   348  	ret = kvm_set_device_attr(vgic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &redist_attr);
   349  	if (ret == -1) {
   350  		close(vgic_fd);
   351  		return -1;
   352  	}
   353  
   354  	ret = kvm_set_device_attr(vgic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
   355  	if (ret == -1) {
   356  		close(vgic_fd);
   357  		return -1;
   358  	}
   359  
   360  	return vgic_fd;
   361  }
   362  #endif
   363  
   364  #if SYZ_EXECUTOR || __NR_syz_kvm_assert_syzos_uexit
   365  static long syz_kvm_assert_syzos_uexit(volatile long a0, volatile long a1)
   366  {
   367  	struct kvm_run* run = (struct kvm_run*)a0;
   368  	uint64 expect = a1;
   369  
   370  	if (!run || (run->exit_reason != KVM_EXIT_MMIO) || (run->mmio.phys_addr != ARM64_ADDR_UEXIT)) {
   371  		errno = EINVAL;
   372  		return -1;
   373  	}
   374  
   375  	if ((((uint64*)(run->mmio.data))[0]) != expect) {
   376  		errno = EDOM;
   377  		return -1;
   378  	}
   379  	return 0;
   380  }
   381  #endif
   382  
   383  #if SYZ_EXECUTOR || __NR_syz_kvm_assert_reg
   384  static long syz_kvm_assert_reg(volatile long a0, volatile long a1, volatile long a2)
   385  {
   386  	int vcpu_fd = (int)a0;
   387  	uint64 id = (uint64)a1;
   388  	uint64 expect = a2, val = 0;
   389  
   390  	struct kvm_one_reg reg = {.id = id, .addr = (uint64)&val};
   391  	int ret = ioctl(vcpu_fd, KVM_GET_ONE_REG, &reg);
   392  	if (ret)
   393  		return ret;
   394  	if (val != expect) {
   395  		errno = EDOM;
   396  		return -1;
   397  	}
   398  	return 0;
   399  }
   400  #endif
   401  
   402  #endif // EXECUTOR_COMMON_KVM_ARM64_H