aboutsummaryrefslogtreecommitdiffstats
path: root/executor
diff options
context:
space:
mode:
author6eanut <jiakaiPeanut@gmail.com>2026-01-22 04:23:38 +0000
committerAlexander Potapenko <glider@google.com>2026-02-25 07:36:12 +0000
commitc162cde9eaf50b92de5277df3e48c93349bf45a3 (patch)
tree4560241f339f0b21ebefa31b0db831886d55df73 /executor
parent787dfb7c5a058a72a4936baaab3c96c076d03079 (diff)
executor, sys/linux, pkg: enable syzos for riscv64
This patch enables syzos for riscv64 and implements the corresponding pseudo syscalls. Pseudo syscalls: - syz_kvm_setup_syzos_vm - syz_kvm_add_vcpu - syz_kvm_assert_syzos_uexit Syzos guest support: - guest_uexit - guest_execute_code - guest_handle_csrr and guest_handle_csrw Test seeds: - riscv64-syz_kvm_setup_syzos_vm - riscv64-syz_kvm_setup_syzos_vm-csrr - riscv64-syz_kvm_setup_syzos_vm-csrw
Diffstat (limited to 'executor')
-rw-r--r--executor/common_kvm_riscv64.h298
-rw-r--r--executor/common_kvm_riscv64_syzos.h208
-rw-r--r--executor/kvm.h22
3 files changed, 514 insertions, 14 deletions
diff --git a/executor/common_kvm_riscv64.h b/executor/common_kvm_riscv64.h
index 500240638..2c0ed8062 100644
--- a/executor/common_kvm_riscv64.h
+++ b/executor/common_kvm_riscv64.h
@@ -12,13 +12,14 @@
#include <string.h>
#include <sys/ioctl.h>
-#if SYZ_EXECUTOR || __NR_syz_kvm_setup_cpu
-struct kvm_text {
- uintptr_t type;
- const void* text;
- uintptr_t size;
-};
+#include "common_kvm.h"
+#include "kvm.h"
+#if SYZ_EXECUTOR || __NR_syz_kvm_setup_cpu || __NR_syz_kvm_setup_syzos_vm || __NR_syz_kvm_add_vcpu
+#include "common_kvm_riscv64_syzos.h"
+#endif
+
+#if SYZ_EXECUTOR || __NR_syz_kvm_setup_cpu || __NR_syz_kvm_add_vcpu
// Construct RISC-V register id for KVM.
#define RISCV_CORE_REG(idx) (KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_CORE | (idx))
#define RISCV_CSR_REG(idx) (KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_CSR | (idx))
@@ -83,9 +84,6 @@ enum riscv_core_index {
// Indicate the Supervisor Interrupt Enable state.
#define SSTATUS_SIE (1UL << 1)
-// Define the starting physical address for the guest code.
-#define CODE_START 0x80000000ULL
-
// Set a single register value for the specified CPU file descriptor.
static inline int kvm_set_reg(int cpufd, unsigned long id, unsigned long value)
{
@@ -96,6 +94,21 @@ static inline int kvm_set_reg(int cpufd, unsigned long id, unsigned long value)
return ioctl(cpufd, KVM_SET_ONE_REG, &reg);
}
+struct kvm_text {
+ uintptr_t type;
+ const void* text;
+ uintptr_t size;
+};
+#endif
+
+#if SYZ_EXECUTOR || __NR_syz_kvm_setup_cpu || __NR_syz_kvm_setup_syzos_vm
+#ifndef MIN
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#endif
+#endif
+
+#if SYZ_EXECUTOR || __NR_syz_kvm_setup_cpu
+
// syz_kvm_setup_cpu$riscv64(fd fd_kvmvm, cpufd fd_kvmcpu, usermem vma[24], text ptr[in, array[kvm_text_riscv64, 1]], ntext len[text], flags const[0], opts ptr[in, array[kvm_setup_opt_riscv64, 1]], nopt len[opts])
static volatile long syz_kvm_setup_cpu(volatile long a0, volatile long a1, volatile long a2, volatile long a3, volatile long a4, volatile long a5, volatile long a6, volatile long a7)
{
@@ -113,7 +126,7 @@ static volatile long syz_kvm_setup_cpu(volatile long a0, volatile long a1, volat
struct kvm_userspace_memory_region mem = {
.slot = (unsigned int)i,
.flags = 0,
- .guest_phys_addr = CODE_START + i * page_size,
+ .guest_phys_addr = RISCV64_ADDR_USER_CODE + i * page_size,
.memory_size = page_size,
.userspace_addr =
(uintptr_t)(host_mem + i * page_size),
@@ -131,13 +144,14 @@ static volatile long syz_kvm_setup_cpu(volatile long a0, volatile long a1, volat
if (size > guest_mem_size)
size = guest_mem_size;
memcpy(host_mem, text, size);
+ memcpy(host_mem + page_size, (void*)guest_unexp_trap, MIN(KVM_PAGE_SIZE, (size_t)((char*)__stop_guest - (char*)guest_unexp_trap)));
// Initialize VCPU registers.
// Set PC (program counter) to start of code.
- if (kvm_set_reg(cpufd, RISCV_CORE_REG(CORE_PC), CODE_START))
+ if (kvm_set_reg(cpufd, RISCV_CORE_REG(CORE_PC), RISCV64_ADDR_USER_CODE))
return -1;
// Set SP (stack pointer) at end of memory, reserving space for stack.
- unsigned long stack_top = CODE_START + guest_mem_size - page_size;
+ unsigned long stack_top = RISCV64_ADDR_USER_CODE + guest_mem_size - page_size;
if (kvm_set_reg(cpufd, RISCV_CORE_REG(CORE_SP), stack_top))
return -1;
// Set privilege mode to S-mode.
@@ -148,9 +162,17 @@ static volatile long syz_kvm_setup_cpu(volatile long a0, volatile long a1, volat
if (kvm_set_reg(cpufd, RISCV_CSR_REG(CSR_SSTATUS), sstatus))
return -1;
// Set STVEC.
- unsigned long stvec = CODE_START + page_size;
+ unsigned long stvec = RISCV64_ADDR_USER_CODE + page_size;
if (kvm_set_reg(cpufd, RISCV_CSR_REG(CSR_STVEC), stvec))
return -1;
+ // Set GP.
+ unsigned long current_gp = 0;
+ asm volatile("add %0, gp, zero"
+ : "=r"(current_gp)
+ :
+ : "memory");
+ if (kvm_set_reg(cpufd, RISCV_CORE_REG(CORE_GP), current_gp))
+ return -1;
return 0;
}
@@ -175,4 +197,252 @@ static long syz_kvm_assert_reg(volatile long a0, volatile long a1, volatile long
}
#endif
-#endif // EXECUTOR_COMMON_KVM_RISCV64_H \ No newline at end of file
+#if SYZ_EXECUTOR || __NR_syz_kvm_setup_syzos_vm || __NR_syz_kvm_add_vcpu
+struct kvm_syz_vm {
+ int vmfd;
+ int next_cpu_id;
+ void* host_mem;
+ size_t total_pages;
+ void* user_text;
+};
+#endif
+
+#if SYZ_EXECUTOR || __NR_syz_kvm_setup_syzos_vm
+struct addr_size {
+ void* addr;
+ size_t size;
+};
+
+static struct addr_size alloc_guest_mem(struct addr_size* free, size_t size)
+{
+ struct addr_size ret = {.addr = NULL, .size = 0};
+
+ if (free->size < size)
+ return ret;
+ ret.addr = free->addr;
+ ret.size = size;
+ free->addr = (void*)((char*)free->addr + size);
+ free->size -= size;
+ return ret;
+}
+
+// Call KVM_SET_USER_MEMORY_REGION for the given pages.
+static void vm_set_user_memory_region(int vmfd, uint32 slot, uint32 flags, uint64 guest_phys_addr, uint64 memory_size, uint64 userspace_addr)
+{
+ struct kvm_userspace_memory_region memreg;
+ memreg.slot = slot;
+ memreg.flags = flags;
+ memreg.guest_phys_addr = guest_phys_addr;
+ memreg.memory_size = memory_size;
+ memreg.userspace_addr = userspace_addr;
+ ioctl(vmfd, KVM_SET_USER_MEMORY_REGION, &memreg);
+}
+
+#define AUIPC_OPCODE 0x17
+#define AUIPC_OPCODE_MASK 0x7f
+
+// Code loading SYZOS into guest memory does not handle data relocations (see
+// https://github.com/google/syzkaller/issues/5565), so SYZOS will crash soon after encountering an
+// AUIPC instruction. Detect these instructions to catch regressions early.
+// The most common reason for using data relocaions is accessing global variables and constants.
+// Sometimes the compiler may choose to emit a read-only constant to zero-initialize a structure
+// or to generate a jump table for a switch statement.
+static void validate_guest_code(void* mem, size_t size)
+{
+ uint32* insns = (uint32*)mem;
+ for (size_t i = 0; i < size / 4; i++) {
+ if ((insns[i] & AUIPC_OPCODE_MASK) == AUIPC_OPCODE)
+ fail("AUIPC instruction detected in SYZOS, exiting");
+ }
+}
+
+static void install_syzos_code(void* host_mem, size_t mem_size)
+{
+ size_t size = (char*)&__stop_guest - (char*)&__start_guest;
+ if (size > mem_size)
+ fail("SYZOS size exceeds guest memory");
+ memcpy(host_mem, &__start_guest, size);
+ validate_guest_code(host_mem, size);
+}
+
+// Flags for mem_region.
+#define MEM_REGION_FLAG_USER_CODE (1 << 0)
+#define MEM_REGION_FLAG_DIRTY_LOG (1 << 1)
+#define MEM_REGION_FLAG_READONLY (1 << 2)
+#define MEM_REGION_FLAG_EXECUTOR_CODE (1 << 3)
+#define MEM_REGION_FLAG_EXCEPTION_VEC (1 << 4)
+#define MEM_REGION_FLAG_NO_HOST_MEM (1 << 6)
+
+struct mem_region {
+ uint64 gpa;
+ int pages;
+ uint32 flags;
+};
+
+// SYZOS guest virtual memory layout (must be in sync with executor/kvm.h):
+static const struct mem_region syzos_mem_regions[] = {
+ // Exception vector table (1 page at 0x1000).
+ {RISCV64_ADDR_EXCEPTION_VECTOR, 1, MEM_REGION_FLAG_READONLY | MEM_REGION_FLAG_EXCEPTION_VEC},
+ // CLINT at 0x02000000 (MMIO, no memory).
+ {RISCV64_ADDR_CLINT, 1, MEM_REGION_FLAG_NO_HOST_MEM},
+ // PLIC at 0x0c000000 (MMIO, no memory).
+ {RISCV64_ADDR_PLIC, 1, MEM_REGION_FLAG_NO_HOST_MEM},
+ // Unmapped region to trigger page faults (1 page at 0x40000000).
+ {RISCV64_ADDR_EXIT, 1, MEM_REGION_FLAG_NO_HOST_MEM},
+ // Writable region with KVM_MEM_LOG_DIRTY_PAGES (2 pages).
+ {RISCV64_ADDR_DIRTY_PAGES, 2, MEM_REGION_FLAG_DIRTY_LOG},
+ // User code (KVM_MAX_VCPU pages, starting at 0x80000000).
+ {RISCV64_ADDR_USER_CODE, KVM_MAX_VCPU, MEM_REGION_FLAG_READONLY | MEM_REGION_FLAG_USER_CODE},
+ // Executor guest code (4 pages).
+ {SYZOS_ADDR_EXECUTOR_CODE, 4, MEM_REGION_FLAG_READONLY | MEM_REGION_FLAG_EXECUTOR_CODE},
+ // Scratch memory for runtime code (1 page).
+ {RISCV64_ADDR_SCRATCH_CODE, 1, 0},
+ // Per-vCPU stacks (1 page).
+ {RISCV64_ADDR_STACK_BASE, 1, 0},
+};
+
+static void setup_vm(int vmfd, struct kvm_syz_vm* vm)
+{
+ struct addr_size allocator = {.addr = vm->host_mem, .size = vm->total_pages * KVM_PAGE_SIZE};
+ int slot = 0; // Slot numbers do not matter, they just have to be different.
+
+ for (size_t i = 0; i < sizeof(syzos_mem_regions) / sizeof(syzos_mem_regions[0]); i++) {
+ const struct mem_region* r = &syzos_mem_regions[i];
+ if (r->flags & MEM_REGION_FLAG_NO_HOST_MEM)
+ continue;
+ struct addr_size next = alloc_guest_mem(&allocator, r->pages * KVM_PAGE_SIZE);
+ uint32 flags = 0;
+ if (r->flags & MEM_REGION_FLAG_DIRTY_LOG)
+ flags |= KVM_MEM_LOG_DIRTY_PAGES;
+ if (r->flags & MEM_REGION_FLAG_READONLY)
+ flags |= KVM_MEM_READONLY;
+ if (r->flags & MEM_REGION_FLAG_USER_CODE)
+ vm->user_text = next.addr;
+ if (r->flags & MEM_REGION_FLAG_EXCEPTION_VEC)
+ memcpy(next.addr, (void*)guest_unexp_trap, MIN(KVM_PAGE_SIZE, (size_t)((char*)__stop_guest - (char*)guest_unexp_trap)));
+ if (r->flags & MEM_REGION_FLAG_EXECUTOR_CODE)
+ install_syzos_code(next.addr, next.size);
+ vm_set_user_memory_region(vmfd, slot++, flags, r->gpa, next.size, (uintptr_t)next.addr);
+ }
+
+ // Map the remaining pages at an unused address.
+ if (allocator.size > 0) {
+ struct addr_size next = alloc_guest_mem(&allocator, allocator.size);
+ vm_set_user_memory_region(vmfd, slot++, 0, 0, next.size, (uintptr_t)next.addr);
+ }
+}
+
+static long syz_kvm_setup_syzos_vm(volatile long a0, volatile long a1)
+{
+ const int vmfd = a0;
+ void* host_mem = (void*)a1;
+ struct kvm_syz_vm* ret = (struct kvm_syz_vm*)host_mem;
+ ret->host_mem = (void*)((uint64)host_mem + KVM_PAGE_SIZE);
+ ret->total_pages = KVM_GUEST_PAGES - 1;
+ setup_vm(vmfd, ret);
+ ret->vmfd = vmfd;
+ ret->next_cpu_id = 0;
+
+ return (long)ret;
+}
+#endif
+
+#if SYZ_EXECUTOR || __NR_syz_kvm_add_vcpu
+// Set up CPU registers.
+static void reset_cpu_regs(int cpufd, int cpu_id, size_t text_size)
+{
+ // PC points to the relative offset of guest_main() within the guest code.
+ kvm_set_reg(cpufd, RISCV_CORE_REG(CORE_PC), executor_fn_guest_addr(guest_main));
+ kvm_set_reg(cpufd, RISCV_CORE_REG(CORE_SP), RISCV64_ADDR_STACK_BASE + KVM_PAGE_SIZE - 128);
+ kvm_set_reg(cpufd, RISCV_CORE_REG(CORE_TP), cpu_id);
+ // Pass parameters to guest_main().
+ kvm_set_reg(cpufd, RISCV_CORE_REG(CORE_A0), text_size);
+ kvm_set_reg(cpufd, RISCV_CORE_REG(CORE_A1), cpu_id);
+ // Set SSTATUS and MODE.
+ kvm_set_reg(cpufd, RISCV_CORE_REG(CORE_MODE), 1);
+ kvm_set_reg(cpufd, RISCV_CSR_REG(CSR_SSTATUS), SSTATUS_SPP | SSTATUS_SPIE);
+ // Set GP.
+ unsigned long current_gp = 0;
+ asm volatile("add %0, gp, zero"
+ : "=r"(current_gp)
+ :
+ : "memory");
+ kvm_set_reg(cpufd, RISCV_CORE_REG(CORE_GP), current_gp);
+ // Set STVEC.
+ kvm_set_reg(cpufd, RISCV_CSR_REG(CSR_STVEC), RISCV64_ADDR_EXCEPTION_VECTOR);
+}
+
+static void install_user_code(int cpufd, void* user_text_slot, int cpu_id, const void* text, size_t text_size)
+{
+ if ((cpu_id < 0) || (cpu_id >= KVM_MAX_VCPU))
+ return;
+ if (!user_text_slot)
+ return;
+ if (text_size > KVM_PAGE_SIZE)
+ text_size = KVM_PAGE_SIZE;
+ void* target = (void*)((uint64)user_text_slot + (KVM_PAGE_SIZE * cpu_id));
+ memcpy(target, text, text_size);
+ reset_cpu_regs(cpufd, cpu_id, text_size);
+}
+
+static long syz_kvm_add_vcpu(volatile long a0, volatile long a1, volatile long a2, volatile long a3)
+{
+ struct kvm_syz_vm* vm = (struct kvm_syz_vm*)a0;
+ struct kvm_text* utext = (struct kvm_text*)a1;
+ const void* text = utext->text;
+ size_t text_size = utext->size;
+
+ if (!vm) {
+ errno = EINVAL;
+ return -1;
+ }
+ if (vm->next_cpu_id == KVM_MAX_VCPU) {
+ errno = ENOMEM;
+ return -1;
+ }
+ int cpu_id = vm->next_cpu_id;
+ int cpufd = ioctl(vm->vmfd, KVM_CREATE_VCPU, cpu_id);
+ if (cpufd == -1)
+ return -1;
+ // Only increment next_cpu_id if CPU creation succeeded.
+ vm->next_cpu_id++;
+ install_user_code(cpufd, vm->user_text, cpu_id, text, text_size);
+ return cpufd;
+}
+#endif
+
+#if SYZ_EXECUTOR || __NR_syz_kvm_assert_syzos_uexit
+static long syz_kvm_assert_syzos_uexit(volatile long a0, volatile long a1,
+ volatile long a2)
+{
+#if !SYZ_EXECUTOR
+ int cpufd = (int)a0;
+#endif
+ struct kvm_run* run = (struct kvm_run*)a1;
+ uint64 expect = a2;
+
+ if (!run || (run->exit_reason != KVM_EXIT_MMIO) ||
+ (run->mmio.phys_addr != RISCV64_ADDR_UEXIT)) {
+#if !SYZ_EXECUTOR
+ fprintf(stderr, "[SYZOS-DEBUG] Assertion Triggered on VCPU %d\n", cpufd);
+#endif
+ errno = EINVAL;
+ return -1;
+ }
+
+ uint64 actual_code = ((uint64*)(run->mmio.data))[0];
+ if (actual_code != expect) {
+#if !SYZ_EXECUTOR
+ fprintf(stderr, "[SYZOS-DEBUG] Exit Code Mismatch on VCPU %d\n", cpufd);
+ fprintf(stderr, " Expected: 0x%lx\n", (unsigned long)expect);
+ fprintf(stderr, " Actual: 0x%lx\n",
+ (unsigned long)actual_code);
+#endif
+ errno = EDOM;
+ return -1;
+ }
+ return 0;
+}
+#endif
+
+#endif // EXECUTOR_COMMON_KVM_RISCV64_H
diff --git a/executor/common_kvm_riscv64_syzos.h b/executor/common_kvm_riscv64_syzos.h
new file mode 100644
index 000000000..421ca5929
--- /dev/null
+++ b/executor/common_kvm_riscv64_syzos.h
@@ -0,0 +1,208 @@
+// Copyright 2026 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+#ifndef EXECUTOR_COMMON_KVM_RISCV64_SYZOS_H
+#define EXECUTOR_COMMON_KVM_RISCV64_SYZOS_H
+
+// This file provides guest code running inside the RISCV64 KVM.
+
+#include <linux/kvm.h>
+
+#include "common_kvm_syzos.h"
+#include "kvm.h"
+
+// Remember these constants must match those in sys/linux/dev_kvm_riscv64.txt.
+typedef enum {
+ SYZOS_API_UEXIT = 0,
+ SYZOS_API_CODE = 10,
+ SYZOS_API_CSRR = 100,
+ SYZOS_API_CSRW = 101,
+ SYZOS_API_STOP, // Must be the last one
+} syzos_api_id;
+
+struct api_call_header {
+ uint64 call;
+ uint64 size;
+};
+
+struct api_call_code {
+ struct api_call_header header;
+ uint32 insns[];
+};
+
+struct api_call_1 {
+ struct api_call_header header;
+ uint64 arg;
+};
+
+struct api_call_2 {
+ struct api_call_header header;
+ uint64 args[2];
+};
+
+GUEST_CODE static void guest_uexit(uint64 exit_code);
+GUEST_CODE static void guest_execute_code(uint32* insns, uint64 size);
+GUEST_CODE static void guest_handle_csrr(uint32 csr);
+GUEST_CODE static void guest_handle_csrw(uint32 csr, uint64 val);
+
+// Main guest function that performs necessary setup and passes the control to the user-provided
+// payload.
+// The inner loop uses a complex if-statement, because Clang is eager to insert a jump table into
+// a switch statement.
+// We add single-line comments to justify having the compound statements below.
+__attribute__((used))
+GUEST_CODE static void
+guest_main(uint64 size, uint64 cpu)
+{
+ uint64 addr = RISCV64_ADDR_USER_CODE + cpu * 0x1000;
+
+ while (size >= sizeof(struct api_call_header)) {
+ struct api_call_header* cmd = (struct api_call_header*)addr;
+ if (cmd->call >= SYZOS_API_STOP)
+ return;
+ if (cmd->size > size)
+ return;
+ volatile uint64 call = cmd->call;
+ if (call == SYZOS_API_UEXIT) {
+ // Issue a user exit.
+ struct api_call_1* ccmd = (struct api_call_1*)cmd;
+ guest_uexit(ccmd->arg);
+ } else if (call == SYZOS_API_CODE) {
+ // Execute an instruction blob.
+ struct api_call_code* ccmd = (struct api_call_code*)cmd;
+ guest_execute_code(ccmd->insns, cmd->size - sizeof(struct api_call_header));
+ } else if (call == SYZOS_API_CSRR) {
+ // Execute a csrr instruction.
+ struct api_call_1* ccmd = (struct api_call_1*)cmd;
+ guest_handle_csrr(ccmd->arg);
+ } else if (call == SYZOS_API_CSRW) {
+ // Execute a csrw instruction.
+ struct api_call_2* ccmd = (struct api_call_2*)cmd;
+ guest_handle_csrw(ccmd->args[0], ccmd->args[1]);
+ }
+ addr += cmd->size;
+ size -= cmd->size;
+ };
+ guest_uexit((uint64)-1);
+}
+
+// Perform a userspace exit that can be handled by the host.
+// The host returns from ioctl(KVM_RUN) with kvm_run.exit_reason=KVM_EXIT_MMIO,
+// and can handle the call depending on the data passed as exit code.
+GUEST_CODE static noinline void guest_uexit(uint64 exit_code)
+{
+ volatile uint64* ptr = (volatile uint64*)RISCV64_ADDR_UEXIT;
+ *ptr = exit_code;
+}
+
+GUEST_CODE static noinline void guest_execute_code(uint32* insns, uint64 size)
+{
+ asm volatile("fence.i" ::
+ : "memory");
+ volatile void (*fn)() = (volatile void (*)())insns;
+ fn();
+}
+
+// Host sets CORE_TP to contain the virtual CPU id.
+GUEST_CODE static uint32 get_cpu_id()
+{
+ uint64 val = 0;
+ asm volatile("mv %0, tp"
+ : "=r"(val));
+ return (uint32)val;
+}
+
+#define MAX_CACHE_LINE_SIZE 256
+#define RISCV_OPCODE_SYSTEM 0x73
+#define FUNCT3_CSRRW 0x1
+#define FUNCT3_CSRRS 0x2
+#define REG_ZERO 0
+#define REG_A0 10
+#define ENCODE_CSR_INSN(csr, rs1, funct3, rd) \
+ (((csr) << 20) | ((rs1) << 15) | ((funct3) << 12) | ((rd) << 7) | RISCV_OPCODE_SYSTEM)
+
+GUEST_CODE static noinline void
+guest_handle_csrr(uint32 csr)
+{
+ uint32 cpu_id = get_cpu_id();
+ // Make sure CPUs use different cache lines for scratch code.
+ uint32* insn = (uint32*)((uint64)RISCV64_ADDR_SCRATCH_CODE + cpu_id * MAX_CACHE_LINE_SIZE);
+ // insn[0] - csrr a0, csr
+ // insn[1] - ret
+ insn[0] = ENCODE_CSR_INSN(csr, REG_ZERO, FUNCT3_CSRRS, REG_A0);
+ insn[1] = 0x00008067;
+ asm volatile("fence.i" ::
+ : "memory");
+ asm volatile(
+ "jalr ra, 0(%0)"
+ :
+ : "r"(insn)
+ : "ra", "a0", "memory");
+}
+
+GUEST_CODE static noinline void
+guest_handle_csrw(uint32 csr, uint64 val)
+{
+ uint32 cpu_id = get_cpu_id();
+ // Make sure CPUs use different cache lines for scratch code.
+ uint32* insn = (uint32*)((uint64)RISCV64_ADDR_SCRATCH_CODE + cpu_id * MAX_CACHE_LINE_SIZE);
+ // insn[0] - csrw csr, a0
+ // insn[1] - ret
+ insn[0] = ENCODE_CSR_INSN(csr, REG_A0, FUNCT3_CSRRW, REG_ZERO);
+ insn[1] = 0x00008067;
+ asm volatile("fence.i" ::
+ : "memory");
+ asm volatile(
+ "mv a0, %0\n"
+ "jalr ra, 0(%1)"
+ :
+ : "r"(val), "r"(insn)
+ : "a0", "ra", "memory");
+}
+
+// The exception vector table setup and SBI invocation here follow the
+// implementation in Linux kselftest KVM RISC-V tests.
+// See https://elixir.bootlin.com/linux/v6.19-rc5/source/tools/testing/selftests/kvm/lib/riscv/processor.c#L337 .
+#define KVM_RISCV_SBI_EXT 0x08FFFFFF
+#define KVM_RISCV_SBI_UNEXP 1
+
+struct sbiret {
+ long error;
+ long value;
+};
+
+GUEST_CODE static inline struct sbiret
+sbi_ecall(unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5,
+ int fid, int ext)
+{
+ struct sbiret ret;
+
+ register unsigned long a0 asm("a0") = arg0;
+ register unsigned long a1 asm("a1") = arg1;
+ register unsigned long a2 asm("a2") = arg2;
+ register unsigned long a3 asm("a3") = arg3;
+ register unsigned long a4 asm("a4") = arg4;
+ register unsigned long a5 asm("a5") = arg5;
+ register unsigned long a6 asm("a6") = fid;
+ register unsigned long a7 asm("a7") = ext;
+ asm volatile("ecall"
+ : "+r"(a0), "+r"(a1)
+ : "r"(a2), "r"(a3), "r"(a4), "r"(a5), "r"(a6), "r"(a7)
+ : "memory");
+ ret.error = a0;
+ ret.value = a1;
+
+ return ret;
+}
+
+GUEST_CODE __attribute__((used)) __attribute((__aligned__(16))) static void
+guest_unexp_trap(void)
+{
+ sbi_ecall(0, 0, 0, 0, 0, 0,
+ KVM_RISCV_SBI_UNEXP,
+ KVM_RISCV_SBI_EXT);
+}
+
+#endif // EXECUTOR_COMMON_KVM_RISCV64_SYZOS_H \ No newline at end of file
diff --git a/executor/kvm.h b/executor/kvm.h
index 227967fea..705d792b2 100644
--- a/executor/kvm.h
+++ b/executor/kvm.h
@@ -546,4 +546,26 @@
#endif // ARM64 SYZOS definitions
+// RISCV64 SYZOS definitions.
+#if GOARCH_riscv64
+// Core Local INTerruptor address.
+#define RISCV64_ADDR_CLINT 0x02000000
+// Platform Level Interrupt Controller address.
+#define RISCV64_ADDR_PLIC 0x0c000000
+// Write to this page to trigger a page fault and stop KVM_RUN.
+#define RISCV64_ADDR_EXIT 0x40000000
+// Two writable pages with KVM_MEM_LOG_DIRTY_PAGES explicitly set.
+#define RISCV64_ADDR_DIRTY_PAGES 0x40001000
+#define RISCV64_ADDR_USER_CODE 0x80000000
+// Location of the SYZOS guest code. Name shared with x86 SYZOS.
+#define SYZOS_ADDR_EXECUTOR_CODE 0x80008000
+#define RISCV64_ADDR_SCRATCH_CODE 0x80010000
+#define RISCV64_ADDR_STACK_BASE 0x80020000
+#define RISCV64_ADDR_EXCEPTION_VECTOR 0x00001000
+
+// Dedicated address within the exit page for the uexit command.
+#define RISCV64_ADDR_UEXIT (RISCV64_ADDR_EXIT + 256)
+
+#endif // RISCV64 SYZOS definitions
+
#endif // EXECUTOR_KVM_H