aboutsummaryrefslogtreecommitdiffstats
path: root/executor/common_kvm_arm64_syzos.h
diff options
context:
space:
mode:
authorAlexander Potapenko <glider@google.com>2024-08-08 17:15:28 +0200
committerAlexander Potapenko <glider@google.com>2024-09-03 12:16:36 +0000
commita06f1e2de790d7f11a3ba8c4177dcdab7c69850c (patch)
tree5c40f0cd209dd96bcee8a492cd95ce142cf29225 /executor/common_kvm_arm64_syzos.h
parentb2cc5342decd0a59ad80773c34d903e41cbb9ee2 (diff)
executor: arm64: sys/linux: add SYZOS_API_IRQ_SETUP
Implement basic IRQ controller setup for VMs with a single CPU. SYZOS_API_IRQ_SETUP sets up the VGICv3 distributor/redistributor and enables the specified number of SPIs starting from 32. The default IRQ handler is set up to perform a uexit(-2).
Diffstat (limited to 'executor/common_kvm_arm64_syzos.h')
-rw-r--r--executor/common_kvm_arm64_syzos.h459
1 files changed, 459 insertions, 0 deletions
diff --git a/executor/common_kvm_arm64_syzos.h b/executor/common_kvm_arm64_syzos.h
index 9cf0aeb43..183e35342 100644
--- a/executor/common_kvm_arm64_syzos.h
+++ b/executor/common_kvm_arm64_syzos.h
@@ -18,6 +18,7 @@ typedef enum {
SYZOS_API_MSR,
SYZOS_API_SMC,
SYZOS_API_HVC,
+ SYZOS_API_IRQ_SETUP,
SYZOS_API_STOP, // Must be the last one
} syzos_api_id;
@@ -47,11 +48,24 @@ struct api_call_smccc {
uint64 params[5];
};
+struct api_call_irq_setup {
+ struct api_call_header header;
+ uint32 nr_cpus;
+ uint32 nr_spis;
+};
+
static void guest_uexit(uint64 exit_code);
static void guest_execute_code(uint32* insns, uint64 size);
static void guest_handle_msr(uint64 reg, uint64 val);
static void guest_handle_smc(struct api_call_smccc* cmd);
static void guest_handle_hvc(struct api_call_smccc* cmd);
+static void guest_handle_irq_setup(struct api_call_irq_setup* cmd);
+
+typedef enum {
+ UEXIT_END = (uint64)-1,
+ UEXIT_IRQ = (uint64)-2,
+ UEXIT_ASSERT = (uint64)-3,
+} uexit_code;
// Main guest function that performs necessary setup and passes the control to the user-provided
// payload.
@@ -89,6 +103,10 @@ GUEST_CODE static void guest_main(uint64 size)
guest_handle_hvc((struct api_call_smccc*)cmd);
break;
}
+ case SYZOS_API_IRQ_SETUP: {
+ guest_handle_irq_setup((struct api_call_irq_setup*)cmd);
+ break;
+ }
}
addr += cmd->size;
size -= cmd->size;
@@ -185,3 +203,444 @@ GUEST_CODE static void guest_handle_hvc(struct api_call_smccc* cmd)
"x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17",
"memory");
}
+
+// VGICv3 setup and IRQ handling code below.
+// This code is based on the "Arm Generic Interrupt Controller (GIC) Architecture Specification.
+// GIC architecture version 3 and version 4" doc (https://developer.arm.com/documentation/ihi0069/latest/)
+// and KVM selftests in the Linux kernel.
+
+// GICv3 Distributor registers.
+#define GICD_CTLR 0x0000
+#define GICD_IGROUPR 0x0080
+#define GICD_ISENABLER 0x0100
+#define GICD_ICENABLER 0x0180
+#define GICD_ICACTIVER 0x0380
+#define GICD_IPRIORITYR 0x0400
+
+#define GICD_INT_DEF_PRI_X4 0xa0a0a0a0
+#define GICD_CTLR_ARE_NS (1U << 4)
+#define GICD_CTLR_ENABLE_G1A (1U << 1)
+#define GICD_CTLR_ENABLE_G1 (1U << 0)
+
+#define GICD_CTLR_RWP (1U << 31)
+
+// GICv3 Redistributor registers.
+#define GICR_CTLR_RWP (1UL << 3)
+#define GICR_CTLR GICD_CTLR
+#define GICR_WAKER 0x0014
+#define GICR_IGROUPR0 GICD_IGROUPR
+#define GICR_ICENABLER0 GICD_ICENABLER
+#define GICR_ICACTIVER0 GICD_ICACTIVER
+#define GICR_IPRIORITYR0 GICD_IPRIORITYR
+
+#define ICC_SRE_EL1_SRE (1U << 0)
+#define ICC_PMR_DEF_PRIO 0xff
+#define ICC_IGRPEN1_EL1_ENABLE (1U << 0)
+
+#define GICR_WAKER_ProcessorSleep (1U << 1)
+#define GICR_WAKER_ChildrenAsleep (1U << 2)
+
+// When building with tools/syz-old-env, GCC doesn't recognize the names of ICC registers.
+// Replace them with generic S3_* names until we get a newer toolchain.
+#define ICC_SRE_EL1 "S3_0_C12_C12_5"
+#define ICC_PMR_EL1 "S3_0_C4_C6_0"
+#define ICC_IGRPEN1_EL1 "S3_0_C12_C12_7"
+#define ICC_IAR0_EL1 "S3_0_C12_C8_0"
+#define ICC_IAR1_EL1 "S3_0_C12_C12_0"
+#define ICC_EOIR0_EL1 "S3_0_C12_C8_1"
+#define ICC_EOIR1_EL1 "S3_0_C12_C12_1"
+#define ICC_DIR_EL1 "S3_0_C12_C11_1"
+
+static GUEST_CODE __always_inline void __raw_writel(uint32 val, uint64 addr)
+{
+ asm volatile("str %w0, [%1]"
+ :
+ : "rZ"(val), "r"(addr));
+}
+
+static GUEST_CODE __always_inline uint32 __raw_readl(uint64 addr)
+{
+ uint32 val;
+ asm volatile("ldr %w0, [%1]"
+ : "=r"(val)
+ : "r"(addr));
+ return val;
+}
+#define writel_relaxed(v, c) ((void)__raw_writel((uint32)cpu_to_le32(v), (c)))
+#define readl_relaxed(c) ({ uint32 __r = le32_to_cpu(( __le32)__raw_readl(c)); __r; })
+
+#define dmb() asm volatile("dmb sy" \
+ : \
+ : \
+ : "memory")
+
+#define writel(v, c) ({ dmb(); __raw_writel(v, c); })
+#define readl(c) ({ uint32 __v = __raw_readl(c); dmb(); __v; })
+
+// TODO(glider): may want to return extra data to the host.
+#define GUEST_ASSERT(val) \
+ do { \
+ if (!(val)) \
+ guest_uexit(UEXIT_ASSERT); \
+ } while (0)
+
+// Helper to implement guest_udelay().
+GUEST_CODE uint64 read_cntvct(void)
+{
+ uint64 val;
+ asm volatile("mrs %0, cntvct_el0"
+ : "=r"(val));
+ return val;
+}
+
+// Wait for roughly @us microseconds.
+GUEST_CODE static void guest_udelay(uint32 us)
+{
+ uint64 ticks_per_second = 0;
+ // Have to read the frequency every time, since we don't have static storage.
+ asm volatile("mrs %0, cntfrq_el0"
+ : "=r"(ticks_per_second));
+
+ uint64 start = read_cntvct();
+
+ // Target counter value for the desired delay.
+ uint64 target = start + (us * ticks_per_second) / 1000000;
+
+ while (read_cntvct() < target) {
+ }
+}
+
+// Spin for at most one second as long as the register value has bits from mask.
+GUEST_CODE static void spin_while_readl(uint64 reg, uint32 mask)
+{
+ volatile unsigned int count = 100000;
+ while (readl(reg) & mask) {
+ GUEST_ASSERT(count--);
+ guest_udelay(10);
+ }
+}
+
+// Wait for the Register Write Pending bit on GICD_CTLR.
+GUEST_CODE static void gicd_wait_for_rwp()
+{
+ spin_while_readl(ARM64_ADDR_GICD_BASE + GICD_CTLR, GICD_CTLR_RWP);
+}
+
+#define SZ_64K 0x00010000
+GUEST_CODE static uint64 gicr_base_cpu(uint32 cpu)
+{
+ return ARM64_ADDR_GICR_BASE + cpu * SZ_64K * 2;
+}
+
+GUEST_CODE static uint64 sgi_base_cpu(uint32 cpu)
+{
+ return gicr_base_cpu(cpu) + SZ_64K;
+}
+
+// Wait for the Register Write Pending bit on GICR_CTLR.
+GUEST_CODE static void gicr_wait_for_rwp(uint32 cpu)
+{
+ spin_while_readl(gicr_base_cpu(cpu) + GICR_CTLR, GICR_CTLR_RWP);
+}
+
+// Set up the distributor part.
+GUEST_CODE static void gicv3_dist_init(int nr_spis)
+{
+ // Disable the distributor.
+ writel(0, ARM64_ADDR_GICD_BASE + GICD_CTLR);
+ gicd_wait_for_rwp();
+
+ // Mark all the SPI interrupts as non-secure Group-1. Also, deactivate and disable them.
+ for (int i = 32; i < nr_spis + 32; i += 32) {
+ writel(~0, ARM64_ADDR_GICD_BASE + GICD_IGROUPR + i / 8);
+ writel(~0, ARM64_ADDR_GICD_BASE + GICD_ICACTIVER + i / 8);
+ writel(~0, ARM64_ADDR_GICD_BASE + GICD_ICENABLER + i / 8);
+ }
+
+ // Set a default priority for all the SPIs.
+ for (int i = 32; i < nr_spis + 32; i += 4) {
+ writel(GICD_INT_DEF_PRI_X4,
+ ARM64_ADDR_GICD_BASE + GICD_IPRIORITYR + i);
+ }
+
+ // Wait for the settings to sync-in.
+ gicd_wait_for_rwp();
+
+ // Finally, enable the distributor globally with Affinity Routing Enable, Non-Secure.
+ writel(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, ARM64_ADDR_GICD_BASE + GICD_CTLR);
+ gicd_wait_for_rwp();
+}
+
+// https://developer.arm.com/documentation/198123/0302/Configuring-the-Arm-GIC
+GUEST_CODE void gicv3_enable_redist(uint32 cpu)
+{
+ uint64 redist_base_cpu = gicr_base_cpu(cpu);
+ uint32 val = readl(redist_base_cpu + GICR_WAKER);
+
+ val &= ~GICR_WAKER_ProcessorSleep;
+ writel(val, ARM64_ADDR_GICR_BASE + GICR_WAKER);
+ // Wait until the processor is 'active'.
+ spin_while_readl(ARM64_ADDR_GICR_BASE + GICR_WAKER, GICR_WAKER_ChildrenAsleep);
+}
+
+GUEST_CODE void gicv3_cpu_init(uint32 cpu)
+{
+ uint64 sgi_base = sgi_base_cpu(cpu);
+
+ // It is important that software performs these steps before configuring
+ // the CPU interface, otherwise behavior can be UNPREDICTABLE.
+ gicv3_enable_redist(cpu);
+
+ // Mark all the SGI and PPI interrupts as non-secure Group-1. Also, deactivate and disable them.
+ writel(~0, sgi_base + GICR_IGROUPR0);
+ writel(~0, sgi_base + GICR_ICACTIVER0);
+ writel(~0, sgi_base + GICR_ICENABLER0);
+
+ // Set a default priority for all the SGIs and PPIs.
+ for (int i = 0; i < 32; i += 4) {
+ writel(GICD_INT_DEF_PRI_X4,
+ sgi_base + GICR_IPRIORITYR0 + i);
+ }
+
+ gicr_wait_for_rwp(cpu);
+
+ // Enable the GIC system register (ICC_*) access.
+ uint32 icc_sre_el1 = 0;
+ asm volatile("mrs %0, " ICC_SRE_EL1
+ :
+ : "r"(icc_sre_el1));
+ icc_sre_el1 |= ICC_SRE_EL1_SRE;
+ asm volatile("msr " ICC_SRE_EL1 ", %0"
+ :
+ : "r"(icc_sre_el1));
+
+ // Set a default priority threshold.
+ uint32 value = ICC_PMR_DEF_PRIO;
+ asm volatile("msr " ICC_PMR_EL1 ", %0"
+ :
+ : "r"(value));
+
+ // Enable non-secure Group-1 interrupts.
+ value = ICC_IGRPEN1_EL1_ENABLE;
+ asm volatile("msr " ICC_IGRPEN1_EL1 ", %0"
+ :
+ : "r"(value));
+}
+
+// GICv3 reserves interrupts 32-1019 for SPI.
+#define VGICV3_MIN_SPI 32
+#define VGICV3_MAX_SPI 1019
+
+// https://developer.arm.com/documentation/ihi0048/b/Programmers--Model/Distributor-register-descriptions/Interrupt-Set-Enable-Registers--GICD-ISENABLERn
+GUEST_CODE void gicv3_irq_enable(uint32 intid)
+{
+ // TODO(glider): support multiple CPUs. E.g. KVM selftests store CPU ID in TPIDR_EL1.
+ uint32 cpu = 0;
+
+ writel(1 << (intid % 32), ARM64_ADDR_GICD_BASE + GICD_ISENABLER + (intid / 32) * 4);
+ if ((intid >= VGICV3_MIN_SPI) && (intid <= VGICV3_MAX_SPI))
+ gicd_wait_for_rwp();
+ else
+ gicr_wait_for_rwp(cpu);
+}
+
+GUEST_CODE static void guest_handle_irq_setup(struct api_call_irq_setup* cmd)
+{
+ int nr_spis = cmd->nr_spis;
+ if ((nr_spis > VGICV3_MAX_SPI - VGICV3_MIN_SPI) || (nr_spis < 0))
+ nr_spis = 32;
+ int nr_cpus = cmd->nr_cpus;
+
+ gicv3_dist_init(nr_spis);
+ for (int i = 0; i < nr_cpus; i++)
+ gicv3_cpu_init(i);
+ for (int i = 0; i < nr_spis; i++)
+ gicv3_irq_enable(VGICV3_MIN_SPI + i);
+ // Set up the vector table.
+ asm(R"(
+ adr x1, guest_vector_table
+ msr vbar_el1, x1
+ msr daifclr, #0b1111
+ )"
+ :
+ :
+ : "x1");
+}
+
+// Registers saved by one_irq_handler() and received by guest_irq_handler().
+struct ex_regs {
+ uint64 regs[31];
+ uint64 sp;
+ uint64 pc;
+ uint64 pstate;
+};
+
+// Placeholder function to declare one_irq_handler() inside the assembly blob. We cannot put it
+// into a separate .S file, because syzkaller requires a standalone header for reproducers.
+__attribute__((used))
+GUEST_CODE static void
+one_irq_handler_fn()
+{
+ asm volatile(
+ R"(.global one_irq_handler
+ one_irq_handler:
+ # Allocate 34 * uint64 for struct ex_regs.
+ add sp, sp, #-16 * 17
+ # Store registers x0-x29 on the stack.
+ stp x0, x1, [sp, #16 * 0]
+ stp x2, x3, [sp, #16 * 1]
+ stp x4, x5, [sp, #16 * 2]
+ stp x6, x7, [sp, #16 * 3]
+ stp x8, x9, [sp, #16 * 4]
+ stp x10, x11, [sp, #16 * 5]
+ stp x12, x13, [sp, #16 * 6]
+ stp x14, x15, [sp, #16 * 7]
+ stp x16, x17, [sp, #16 * 8]
+ stp x18, x19, [sp, #16 * 9]
+ stp x20, x21, [sp, #16 * 10]
+ stp x22, x23, [sp, #16 * 11]
+ stp x24, x25, [sp, #16 * 12]
+ stp x26, x27, [sp, #16 * 13]
+ stp x28, x29, [sp, #16 * 14]
+
+ add x1, sp, #16 * 17
+ # Store x30 and SP (before allocating ex_regs).
+ stp x30, x1, [sp, #16 * 15]
+
+ # ELR_EL1 holds the PC to return to.
+ mrs x1, elr_el1
+ # SPSR_EL1 is the saved PSTATE.
+ mrs x2, spsr_el1
+ # Also store them to ex_regs.
+ stp x1, x2, [sp, #16 * 16]
+
+ # Call guest_irq_handler(ex_regs).
+ mov x0, sp
+ bl guest_irq_handler
+
+ # Restore ELR_EL1 and SPSR_EL1.
+ ldp x1, x2, [sp, #16 * 16]
+ msr elr_el1, x1
+ msr spsr_el1, x2
+
+ # Restore the GP registers x0-x30 (ignoring SP).
+ ldp x30, xzr, [sp, #16 * 15]
+ ldp x28, x29, [sp, #16 * 14]
+ ldp x26, x27, [sp, #16 * 13]
+ ldp x24, x25, [sp, #16 * 12]
+ ldp x22, x23, [sp, #16 * 11]
+ ldp x20, x21, [sp, #16 * 10]
+ ldp x18, x19, [sp, #16 * 9]
+ ldp x16, x17, [sp, #16 * 8]
+ ldp x14, x15, [sp, #16 * 7]
+ ldp x12, x13, [sp, #16 * 6]
+ ldp x10, x11, [sp, #16 * 5]
+ ldp x8, x9, [sp, #16 * 4]
+ ldp x6, x7, [sp, #16 * 3]
+ ldp x4, x5, [sp, #16 * 2]
+ ldp x2, x3, [sp, #16 * 1]
+ ldp x0, x1, [sp, #16 * 0]
+
+ add sp, sp, #16 * 17
+
+ # Use ERET to exit from an exception.
+ eret)"
+ :
+ :
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13",
+ "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25",
+ "x26", "x27", "x28", "x29", "x30", "memory");
+}
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+__attribute__((used))
+GUEST_CODE static void
+guest_irq_handler(struct ex_regs* regs)
+{
+ uint32 iar0, iar1, irq_num = 0;
+ // Acknowledge the interrupt by reading the IAR.
+ asm volatile("mrs %0, " ICC_IAR0_EL1
+ : "=r"(iar0));
+ asm volatile("mrs %0, " ICC_IAR1_EL1
+ : "=r"(iar1));
+ if (iar0 != 0x3ff) {
+ irq_num = iar0 & 0x3FF;
+ } else if (iar1 != 0x3ff) {
+ irq_num = iar1 & 0x3FF;
+ } else {
+ return;
+ }
+
+ // Handle the interrupt by doing a uexit.
+ // TODO(glider): do something more interesting here.
+ guest_uexit(UEXIT_IRQ);
+
+ // Signal End of Interrupt (EOI) by writing back to the EOIR.
+ if (iar0 != 0x3ff) {
+ asm volatile("msr " ICC_EOIR0_EL1 ", %0"
+ :
+ : "r"(irq_num));
+ } else {
+ asm volatile("msr " ICC_EOIR1_EL1 ", %0"
+ :
+ : "r"(irq_num));
+ }
+ // Deactivate the interrupt.
+ asm volatile("msr " ICC_DIR_EL1 ", %0"
+ :
+ : "r"(irq_num));
+}
+#ifdef __cplusplus
+}
+#endif
+
+// Default IRQ handler.
+#define IRQ_ENTRY \
+ ".balign 0x80\n" \
+ "b one_irq_handler\n"
+
+// Unused IRQ entry.
+#define IRQ_ENTRY_DUMMY \
+ ".balign 0x80\n" \
+ "eret\n"
+
+// clang-format off
+// guest_vector_table_fn() is never used, it is just needed to declare guest_vector_table()
+// inside the assembly blob.
+__attribute__((used))
+GUEST_CODE static void guest_vector_table_fn()
+{
+ // Exception vector table as explained at
+ // https://developer.arm.com/documentation/100933/0100/AArch64-exception-vector-table.
+ asm volatile(
+ ".global guest_vector_table\n"
+ ".balign 2048\n"
+ "guest_vector_table:\n"
+ // Exception handlers for current EL with SP0.
+ IRQ_ENTRY_DUMMY
+ IRQ_ENTRY_DUMMY
+ IRQ_ENTRY_DUMMY
+ IRQ_ENTRY_DUMMY
+
+ // Exception handlers for current EL with SPx.
+ IRQ_ENTRY_DUMMY
+ // Only handle IRQ/vIRQ for now.
+ IRQ_ENTRY
+ IRQ_ENTRY_DUMMY
+ IRQ_ENTRY_DUMMY
+
+ // Exception handlers for lower EL using AArch64.
+ IRQ_ENTRY_DUMMY
+ IRQ_ENTRY_DUMMY
+ IRQ_ENTRY_DUMMY
+ IRQ_ENTRY_DUMMY
+
+ // Exception handlers for lower EL using AArch32.
+ IRQ_ENTRY_DUMMY
+ IRQ_ENTRY_DUMMY
+ IRQ_ENTRY_DUMMY
+ IRQ_ENTRY_DUMMY);
+}
+// clang-format on