diff options
Diffstat (limited to 'executor')
| -rw-r--r-- | executor/common_kvm_amd64.h | 46 |
1 files changed, 38 insertions, 8 deletions
diff --git a/executor/common_kvm_amd64.h b/executor/common_kvm_amd64.h index cb9499a81..b8e86127e 100644 --- a/executor/common_kvm_amd64.h +++ b/executor/common_kvm_amd64.h @@ -243,6 +243,8 @@ static const struct mem_region syzos_mem_regions[] = { #endif #if SYZ_EXECUTOR || __NR_syz_kvm_setup_syzos_vm || __NR_syz_kvm_setup_cpu || __NR_syz_kvm_add_vcpu +#define SYZOS_REGION_COUNT (sizeof(syzos_mem_regions) / sizeof(syzos_mem_regions[0])) + struct kvm_syz_vm { int vmfd; int next_cpu_id; @@ -252,9 +254,32 @@ struct kvm_syz_vm { void* gpa0_mem; void* pt_pool_mem; void* globals_mem; + void* region_base[SYZOS_REGION_COUNT]; }; #endif +#if SYZ_EXECUTOR || __NR_syz_kvm_setup_syzos_vm || __NR_syz_kvm_setup_cpu || __NR_syz_kvm_add_vcpu +static inline void* gpa_to_hva(struct kvm_syz_vm* vm, uint64 gpa) +{ + for (size_t i = 0; i < SYZOS_REGION_COUNT; i++) { + const struct mem_region* r = &syzos_mem_regions[i]; + // Skip regions that are not backed by host memory. + if (r->flags & MEM_REGION_FLAG_NO_HOST_MEM) + continue; + + // Size of this region may be unknown yet. Also, it is the last in + // syzos_mem_regions[], so we can safely return if we reach it. + if (r->gpa == X86_SYZOS_ADDR_UNUSED) + break; + + size_t region_size = r->pages * KVM_PAGE_SIZE; + if (gpa >= r->gpa && gpa < r->gpa + region_size) + return (void*)((char*)vm->region_base[i] + (gpa - r->gpa)); + } + return NULL; +} +#endif + #if SYZ_EXECUTOR || __NR_syz_kvm_add_vcpu #define X86_NUM_IDT_ENTRIES 256 @@ -263,7 +288,7 @@ static void syzos_setup_idt(struct kvm_syz_vm* vm, struct kvm_sregs* sregs) sregs->idt.base = X86_SYZOS_ADDR_VAR_IDT; sregs->idt.limit = (X86_NUM_IDT_ENTRIES * sizeof(struct idt_entry_64)) - 1; volatile struct idt_entry_64* idt = - (volatile struct idt_entry_64*)((uint64)vm->host_mem + sregs->idt.base); + (volatile struct idt_entry_64*)(uint64)gpa_to_hva(vm, sregs->idt.base); uint64 handler_addr = executor_fn_guest_addr(dummy_null_handler); for (int i = 0; i < X86_NUM_IDT_ENTRIES; i++) { idt[i].offset_low = (uint16)(handler_addr & 0xFFFF); @@ -374,7 +399,7 @@ static void setup_pg_table(struct kvm_syz_vm* vm) memset(vm->gpa0_mem, 0, 5 * KVM_PAGE_SIZE); // Map all the regions defined in setup_vm() - for (size_t i = 0; i < sizeof(syzos_mem_regions) / sizeof(syzos_mem_regions[0]); i++) { + for (size_t i = 0; i < SYZOS_REGION_COUNT; i++) { int pages = syzos_mem_regions[i].pages; if (syzos_mem_regions[i].flags & MEM_REGION_FLAG_REMAINING) { if (total < 0) @@ -461,7 +486,7 @@ static void setup_gdt_ldt_pg(struct kvm_syz_vm* vm, int cpufd, int cpu_id) sregs.gdt.base = X86_SYZOS_ADDR_GDT; sregs.gdt.limit = 5 * sizeof(struct gdt_entry) - 1; - struct gdt_entry* gdt = (struct gdt_entry*)((uint64)vm->host_mem + sregs.gdt.base); + struct gdt_entry* gdt = (struct gdt_entry*)(uint64)gpa_to_hva(vm, sregs.gdt.base); struct kvm_segment seg_cs64; memset(&seg_cs64, 0, sizeof(seg_cs64)); @@ -506,7 +531,7 @@ static void setup_gdt_ldt_pg(struct kvm_syz_vm* vm, int cpufd, int cpu_id) // The L1 TSS memory is at (vm->host_mem + X86_SYZOS_ADDR_VAR_TSS) volatile uint8* l1_tss = - (volatile uint8*)((uint64)vm->host_mem + X86_SYZOS_ADDR_VAR_TSS); + (volatile uint8*)(uint64)gpa_to_hva(vm, X86_SYZOS_ADDR_VAR_TSS); // Zero out the TSS (104 bytes for 64-bit) memset((void*)l1_tss, 0, 104); @@ -529,7 +554,7 @@ static void setup_gdt_ldt_pg(struct kvm_syz_vm* vm, int cpufd, int cpu_id) sregs.efer |= X86_EFER_SVME; // Zero out the HSAVE area for AMD. - void* hsave_host = (void*)((uint64)vm->host_mem + X86_SYZOS_ADDR_VM_ARCH_SPECIFIC(cpu_id)); + void* hsave_host = (void*)(uint64)gpa_to_hva(vm, X86_SYZOS_ADDR_VM_ARCH_SPECIFIC(cpu_id)); memset(hsave_host, 0, KVM_PAGE_SIZE); } @@ -1187,16 +1212,20 @@ static void setup_vm(int vmfd, struct kvm_syz_vm* vm) int slot = 0; // Slot numbers do not matter, they just have to be different. struct syzos_boot_args* boot_args = NULL; - for (size_t i = 0; i < sizeof(syzos_mem_regions) / sizeof(syzos_mem_regions[0]); i++) { + for (size_t i = 0; i < SYZOS_REGION_COUNT; i++) { const struct mem_region* r = &syzos_mem_regions[i]; - if (r->flags & MEM_REGION_FLAG_NO_HOST_MEM) + if (r->flags & MEM_REGION_FLAG_NO_HOST_MEM) { + vm->region_base[i] = NULL; continue; + } size_t pages = r->pages; if (r->flags & MEM_REGION_FLAG_REMAINING) pages = allocator.size / KVM_PAGE_SIZE; struct addr_size next = alloc_guest_mem(&allocator, pages * KVM_PAGE_SIZE); + vm->region_base[i] = next.addr; + uint32 flags = 0; if (r->flags & MEM_REGION_FLAG_DIRTY_LOG) flags |= KVM_MEM_LOG_DIRTY_PAGES; @@ -1213,7 +1242,7 @@ static void setup_vm(int vmfd, struct kvm_syz_vm* vm) if (r->gpa == X86_SYZOS_ADDR_BOOT_ARGS) { boot_args = (struct syzos_boot_args*)next.addr; - boot_args->region_count = sizeof(syzos_mem_regions) / sizeof(syzos_mem_regions[0]); + boot_args->region_count = SYZOS_REGION_COUNT; for (size_t k = 0; k < boot_args->region_count; k++) boot_args->regions[k] = syzos_mem_regions[k]; } @@ -1239,6 +1268,7 @@ static long syz_kvm_setup_syzos_vm(volatile long a0, volatile long a1) struct kvm_syz_vm* ret = (struct kvm_syz_vm*)host_mem; ret->host_mem = (void*)((uint64)host_mem + KVM_PAGE_SIZE); ret->total_pages = KVM_GUEST_PAGES - 1; + setup_vm(vmfd, ret); ret->vmfd = vmfd; ret->next_cpu_id = 0; |
