aboutsummaryrefslogtreecommitdiffstats
path: root/executor/common_kvm_arm64.h
diff options
context:
space:
mode:
authorAlexander Potapenko <glider@google.com>2024-07-23 14:17:26 +0200
committerAlexander Potapenko <glider@google.com>2024-07-29 15:29:47 +0000
commit2fb4dcc9c10e100beedbbc223c2a9762bc45403e (patch)
tree7cae62f7ec97510df2f58be325ce630de2206c56 /executor/common_kvm_arm64.h
parenta22b1135716d02277936c6f48acb1086b3f9a362 (diff)
executor: arm64: sys/linux: introduce syzos API
Allow guest payload to call syzos API functions. The available calls are enumerated by SYZOS_API_* constants, and have a form of: struct api_call { uint64 call; uint64 struct_size; /* arbitrary call-related data here */ }; Complex instruction sequences are too easy to break, so most of the time fuzzer won't be able to efficiently mutate them. We replace kvm_text_arm64 with a sequence of `struct api_call`, making it possible to intermix assembly instructions (SYZOS_API_CODE) with higher-level constructs. Right now the supported calls are: - SYZOS_API_UEXIT - abort from KVM_RUN (1 argument: exit code, uint64) - SYZOS_API_CODE - execute an ARM64 assembly blob (1 argument: inline array of int32's)
Diffstat (limited to 'executor/common_kvm_arm64.h')
-rw-r--r--executor/common_kvm_arm64.h18
1 files changed, 9 insertions, 9 deletions
diff --git a/executor/common_kvm_arm64.h b/executor/common_kvm_arm64.h
index ba02e244b..e6bb2b665 100644
--- a/executor/common_kvm_arm64.h
+++ b/executor/common_kvm_arm64.h
@@ -9,6 +9,7 @@
#include "kvm.h"
// Register encodings from https://docs.kernel.org/virt/kvm/api.html.
+#define KVM_ARM64_REGS_X0 0x6030000000100000UL
#define KVM_ARM64_REGS_PC 0x6030000000100040UL
#define KVM_ARM64_REGS_SP_EL1 0x6030000000100044UL
@@ -60,13 +61,10 @@ static struct addr_size alloc_guest_mem(struct addr_size* free, size_t size)
return ret;
}
-static void fill_with_ret(void* addr, int size)
-{
- uint32* insn = (uint32*)addr;
-
- for (int i = 0; i < size / 4; i++)
- insn[i] = 0xd65f03c0; // RET
-}
+struct api_fn {
+ int index;
+ void* fn;
+};
// syz_kvm_setup_cpu(fd fd_kvmvm, cpufd fd_kvmcpu, usermem vma[24], text ptr[in, array[kvm_text, 1]], ntext len[text], flags flags[kvm_setup_flags], opts ptr[in, array[kvm_setup_opt, 0:2]], nopt len[opts])
static volatile long syz_kvm_setup_cpu(volatile long a0, volatile long a1, volatile long a2, volatile long a3, volatile long a4, volatile long a5, volatile long a6, volatile long a7)
@@ -108,6 +106,7 @@ static volatile long syz_kvm_setup_cpu(volatile long a0, volatile long a1, volat
// Guest physical memory layout:
// 0x00000000 - unused pages
+ // 0xdddd0000 - unmapped region to trigger a page faults for uexits etc. (1 page)
// 0xeeee0000 - user code (1 page)
// 0xeeee8000 - executor guest code (4 pages)
// 0xffff1000 - EL1 stack (1 page)
@@ -119,12 +118,11 @@ static volatile long syz_kvm_setup_cpu(volatile long a0, volatile long a1, volat
vm_set_user_memory_region(vmfd, slot++, KVM_MEM_READONLY, ARM64_ADDR_EXECUTOR_CODE, host_text.size, (uintptr_t)host_text.addr);
struct addr_size next = alloc_guest_mem(&allocator, page_size);
- // Fill the guest code page with RET instructions to be on the safe side.
- fill_with_ret(next.addr, next.size);
if (text_size > next.size)
text_size = next.size;
memcpy(next.addr, text, text_size);
vm_set_user_memory_region(vmfd, slot++, KVM_MEM_READONLY, ARM64_ADDR_USER_CODE, next.size, (uintptr_t)next.addr);
+
next = alloc_guest_mem(&allocator, page_size);
vm_set_user_memory_region(vmfd, slot++, 0, ARM64_ADDR_EL1_STACK_BOTTOM, next.size, (uintptr_t)next.addr);
@@ -143,6 +141,8 @@ static volatile long syz_kvm_setup_cpu(volatile long a0, volatile long a1, volat
// PC points to the relative offset of guest_main() within the guest code.
vcpu_set_reg(cpufd, KVM_ARM64_REGS_PC, ARM64_ADDR_EXECUTOR_CODE + ((uint64)guest_main - (uint64)&__start_guest));
vcpu_set_reg(cpufd, KVM_ARM64_REGS_SP_EL1, ARM64_ADDR_EL1_STACK_BOTTOM + page_size - 128);
+ // Pass parameters to guest_main().
+ vcpu_set_reg(cpufd, KVM_ARM64_REGS_X0, text_size);
return 0;
}