aboutsummaryrefslogtreecommitdiffstats
path: root/executor
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2017-12-27 10:56:12 +0100
committerDmitry Vyukov <dvyukov@google.com>2017-12-27 11:15:04 +0100
commitfd3e9f2b9720b9ba730938686b98cff3aa248984 (patch)
tree1bb89f1bc27e4dc16552a7dfff255788a1dd88db /executor
parent34c18f5f43b3b7804b5650b5af67100262557297 (diff)
executor: introduce uint64/32/16/8 types
The "define uint64_t unsigned long long" were too good to work. With a different toolchain I am getting: cstdint:69:11: error: expected unqualified-id using ::uint64_t; ^ executor/common.h:34:18: note: expanded from macro 'uint64_t' Do it the proper way: introduce uint64/32/16/8 types and use them. pkg/csource then does s/uint64/uint64_t/ to not clutter code with additional typedefs.
Diffstat (limited to 'executor')
-rw-r--r--executor/common.h20
-rw-r--r--executor/common_akaros.h8
-rw-r--r--executor/common_bsd.h6
-rw-r--r--executor/common_fuchsia.h10
-rw-r--r--executor/common_kvm_amd64.h188
-rw-r--r--executor/common_kvm_arm64.h10
-rw-r--r--executor/common_linux.h64
-rw-r--r--executor/common_windows.h4
-rw-r--r--executor/executor.h298
-rw-r--r--executor/executor_akaros.cc12
-rw-r--r--executor/executor_bsd.cc22
-rw-r--r--executor/executor_fuchsia.cc8
-rw-r--r--executor/executor_linux.cc37
-rw-r--r--executor/executor_linux.h6
-rw-r--r--executor/executor_posix.h6
-rw-r--r--executor/executor_windows.cc8
-rw-r--r--executor/executor_windows.h6
-rw-r--r--executor/test_executor_linux.cc10
18 files changed, 364 insertions, 359 deletions
diff --git a/executor/common.h b/executor/common.h
index 50348023f..d7b0b1187 100644
--- a/executor/common.h
+++ b/executor/common.h
@@ -29,9 +29,13 @@
#define exit vsnprintf
#define _exit vsnprintf
-// uint64_t is impossible to printf without using the clumsy and verbose "%" PRId64.
-// So we do the define and use "%lld" to printf uint64_t's.
-#define uint64_t unsigned long long
+// uint64 is impossible to printf without using the clumsy and verbose "%" PRId64.
+// So we define and use uint64. Note: pkg/csource does s/uint64/uint64/.
+// Also define uint32/16/8 for consistency.
+typedef unsigned long long uint64;
+typedef unsigned int uint32;
+typedef unsigned short uint16;
+typedef unsigned char uint8;
#if defined(__GNUC__)
#define SYSCALLAPI
@@ -149,7 +153,7 @@ PRINTF static void debug(const char* msg, ...)
#if defined(SYZ_EXECUTOR) || defined(SYZ_USE_CHECKSUMS)
struct csum_inet {
- uint32_t acc;
+ uint32 acc;
};
static void csum_inet_init(struct csum_inet* csum)
@@ -157,23 +161,23 @@ static void csum_inet_init(struct csum_inet* csum)
csum->acc = 0;
}
-static void csum_inet_update(struct csum_inet* csum, const uint8_t* data, size_t length)
+static void csum_inet_update(struct csum_inet* csum, const uint8* data, size_t length)
{
if (length == 0)
return;
size_t i;
for (i = 0; i < length - 1; i += 2)
- csum->acc += *(uint16_t*)&data[i];
+ csum->acc += *(uint16*)&data[i];
if (length & 1)
- csum->acc += (uint16_t)data[length - 1];
+ csum->acc += (uint16)data[length - 1];
while (csum->acc > 0xffff)
csum->acc = (csum->acc & 0xffff) + (csum->acc >> 16);
}
-static uint16_t csum_inet_digest(struct csum_inet* csum)
+static uint16 csum_inet_digest(struct csum_inet* csum)
{
return ~csum->acc;
}
diff --git a/executor/common_akaros.h b/executor/common_akaros.h
index 9ae6538af..c31492ad8 100644
--- a/executor/common_akaros.h
+++ b/executor/common_akaros.h
@@ -99,13 +99,13 @@ static void install_segv_handler()
#endif
#if defined(SYZ_EXECUTOR) || (defined(SYZ_REPEAT) && defined(SYZ_WAIT_REPEAT))
-static uint64_t current_time_ms()
+static uint64 current_time_ms()
{
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts))
fail("clock_gettime failed");
- return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000;
+ return (uint64)ts.tv_sec * 1000 + (uint64)ts.tv_nsec / 1000000;
}
#endif
@@ -124,7 +124,7 @@ static void use_temporary_dir()
#endif
#if defined(SYZ_EXECUTOR)
-static void sleep_ms(uint64_t ms)
+static void sleep_ms(uint64 ms)
{
usleep(ms * 1000);
}
@@ -220,7 +220,7 @@ void loop()
doexit(0);
}
int status = 0;
- uint64_t start = current_time_ms();
+ uint64 start = current_time_ms();
for (;;) {
int res = waitpid(-1, &status, WNOHANG);
if (res == pid)
diff --git a/executor/common_bsd.h b/executor/common_bsd.h
index ff450f94a..16c919059 100644
--- a/executor/common_bsd.h
+++ b/executor/common_bsd.h
@@ -80,18 +80,18 @@ static void install_segv_handler()
#endif
#if defined(SYZ_EXECUTOR) || (defined(SYZ_REPEAT) && defined(SYZ_WAIT_REPEAT))
-static uint64_t current_time_ms()
+static uint64 current_time_ms()
{
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts))
fail("clock_gettime failed");
- return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000;
+ return (uint64)ts.tv_sec * 1000 + (uint64)ts.tv_nsec / 1000000;
}
#endif
#if defined(SYZ_EXECUTOR)
-static void sleep_ms(uint64_t ms)
+static void sleep_ms(uint64 ms)
{
usleep(ms * 1000);
}
diff --git a/executor/common_fuchsia.h b/executor/common_fuchsia.h
index fc1470c01..608ce91aa 100644
--- a/executor/common_fuchsia.h
+++ b/executor/common_fuchsia.h
@@ -80,7 +80,7 @@ static void* ex_handler(void* arg)
debug("zx_object_get_child failed: %d\n", status);
continue;
}
- uint32_t bytes_read;
+ uint32 bytes_read;
zx_x86_64_general_regs_t regs;
status = zx_thread_read_state(thread, ZX_THREAD_STATE_REGSET0,
&regs, sizeof(regs), &bytes_read);
@@ -88,7 +88,7 @@ static void* ex_handler(void* arg)
debug("zx_thread_read_state failed: %d/%d (%d)\n",
bytes_read, (int)sizeof(regs), status);
} else {
- regs.rip = (uint64_t)(void*)&segv_handler;
+ regs.rip = (uint64)(void*)&segv_handler;
status = zx_thread_write_state(thread, ZX_THREAD_STATE_REGSET0, &regs, sizeof(regs));
if (status != ZX_OK)
debug("zx_thread_write_state failed: %d\n", status);
@@ -126,18 +126,18 @@ static void install_segv_handler()
#endif
#if defined(SYZ_EXECUTOR) || (defined(SYZ_REPEAT) && defined(SYZ_WAIT_REPEAT))
-static uint64_t current_time_ms()
+static uint64 current_time_ms()
{
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts))
fail("clock_gettime failed");
- return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000;
+ return (uint64)ts.tv_sec * 1000 + (uint64)ts.tv_nsec / 1000000;
}
#endif
#if defined(SYZ_EXECUTOR)
-static void sleep_ms(uint64_t ms)
+static void sleep_ms(uint64 ms)
{
usleep(ms * 1000);
}
diff --git a/executor/common_kvm_amd64.h b/executor/common_kvm_amd64.h
index 5dce25311..f3fb8f8b6 100644
--- a/executor/common_kvm_amd64.h
+++ b/executor/common_kvm_amd64.h
@@ -72,88 +72,88 @@
#define PDE64_G (1 << 8)
struct tss16 {
- uint16_t prev;
- uint16_t sp0;
- uint16_t ss0;
- uint16_t sp1;
- uint16_t ss1;
- uint16_t sp2;
- uint16_t ss2;
- uint16_t ip;
- uint16_t flags;
- uint16_t ax;
- uint16_t cx;
- uint16_t dx;
- uint16_t bx;
- uint16_t sp;
- uint16_t bp;
- uint16_t si;
- uint16_t di;
- uint16_t es;
- uint16_t cs;
- uint16_t ss;
- uint16_t ds;
- uint16_t ldt;
+ uint16 prev;
+ uint16 sp0;
+ uint16 ss0;
+ uint16 sp1;
+ uint16 ss1;
+ uint16 sp2;
+ uint16 ss2;
+ uint16 ip;
+ uint16 flags;
+ uint16 ax;
+ uint16 cx;
+ uint16 dx;
+ uint16 bx;
+ uint16 sp;
+ uint16 bp;
+ uint16 si;
+ uint16 di;
+ uint16 es;
+ uint16 cs;
+ uint16 ss;
+ uint16 ds;
+ uint16 ldt;
} __attribute__((packed));
struct tss32 {
- uint16_t prev, prevh;
- uint32_t sp0;
- uint16_t ss0, ss0h;
- uint32_t sp1;
- uint16_t ss1, ss1h;
- uint32_t sp2;
- uint16_t ss2, ss2h;
- uint32_t cr3;
- uint32_t ip;
- uint32_t flags;
- uint32_t ax;
- uint32_t cx;
- uint32_t dx;
- uint32_t bx;
- uint32_t sp;
- uint32_t bp;
- uint32_t si;
- uint32_t di;
- uint16_t es, esh;
- uint16_t cs, csh;
- uint16_t ss, ssh;
- uint16_t ds, dsh;
- uint16_t fs, fsh;
- uint16_t gs, gsh;
- uint16_t ldt, ldth;
- uint16_t trace;
- uint16_t io_bitmap;
+ uint16 prev, prevh;
+ uint32 sp0;
+ uint16 ss0, ss0h;
+ uint32 sp1;
+ uint16 ss1, ss1h;
+ uint32 sp2;
+ uint16 ss2, ss2h;
+ uint32 cr3;
+ uint32 ip;
+ uint32 flags;
+ uint32 ax;
+ uint32 cx;
+ uint32 dx;
+ uint32 bx;
+ uint32 sp;
+ uint32 bp;
+ uint32 si;
+ uint32 di;
+ uint16 es, esh;
+ uint16 cs, csh;
+ uint16 ss, ssh;
+ uint16 ds, dsh;
+ uint16 fs, fsh;
+ uint16 gs, gsh;
+ uint16 ldt, ldth;
+ uint16 trace;
+ uint16 io_bitmap;
} __attribute__((packed));
struct tss64 {
- uint32_t reserved0;
- uint64_t rsp[3];
- uint64_t reserved1;
- uint64_t ist[7];
- uint64_t reserved2;
- uint32_t reserved3;
- uint32_t io_bitmap;
+ uint32 reserved0;
+ uint64 rsp[3];
+ uint64 reserved1;
+ uint64 ist[7];
+ uint64 reserved2;
+ uint32 reserved3;
+ uint32 io_bitmap;
} __attribute__((packed));
-static void fill_segment_descriptor(uint64_t* dt, uint64_t* lt, struct kvm_segment* seg)
+static void fill_segment_descriptor(uint64* dt, uint64* lt, struct kvm_segment* seg)
{
- uint16_t index = seg->selector >> 3;
- uint64_t limit = seg->g ? seg->limit >> 12 : seg->limit;
- uint64_t sd = (limit & 0xffff) | (seg->base & 0xffffff) << 16 | (uint64_t)seg->type << 40 | (uint64_t)seg->s << 44 | (uint64_t)seg->dpl << 45 | (uint64_t)seg->present << 47 | (limit & 0xf0000ULL) << 48 | (uint64_t)seg->avl << 52 | (uint64_t)seg->l << 53 | (uint64_t)seg->db << 54 | (uint64_t)seg->g << 55 | (seg->base & 0xff000000ULL) << 56;
+ uint16 index = seg->selector >> 3;
+ uint64 limit = seg->g ? seg->limit >> 12 : seg->limit;
+ uint64 sd = (limit & 0xffff) | (seg->base & 0xffffff) << 16 | (uint64)seg->type << 40 | (uint64)seg->s << 44 | (uint64)seg->dpl << 45 | (uint64)seg->present << 47 | (limit & 0xf0000ULL) << 48 | (uint64)seg->avl << 52 | (uint64)seg->l << 53 | (uint64)seg->db << 54 | (uint64)seg->g << 55 | (seg->base & 0xff000000ULL) << 56;
NONFAILING(dt[index] = sd);
NONFAILING(lt[index] = sd);
}
-static void fill_segment_descriptor_dword(uint64_t* dt, uint64_t* lt, struct kvm_segment* seg)
+static void fill_segment_descriptor_dword(uint64* dt, uint64* lt, struct kvm_segment* seg)
{
fill_segment_descriptor(dt, lt, seg);
- uint16_t index = seg->selector >> 3;
+ uint16 index = seg->selector >> 3;
NONFAILING(dt[index + 1] = 0);
NONFAILING(lt[index + 1] = 0);
}
-static void setup_syscall_msrs(int cpufd, uint16_t sel_cs, uint16_t sel_cs_cpl3)
+static void setup_syscall_msrs(int cpufd, uint16 sel_cs, uint16 sel_cs_cpl3)
{
char buf[sizeof(struct kvm_msrs) + 5 * sizeof(struct kvm_msr_entry)];
memset(buf, 0, sizeof(buf));
@@ -166,7 +166,7 @@ static void setup_syscall_msrs(int cpufd, uint16_t sel_cs, uint16_t sel_cs_cpl3)
msrs->entries[2].index = MSR_IA32_SYSENTER_EIP;
msrs->entries[2].data = ADDR_VAR_SYSEXIT;
msrs->entries[3].index = MSR_IA32_STAR;
- msrs->entries[3].data = ((uint64_t)sel_cs << 32) | ((uint64_t)sel_cs_cpl3 << 48);
+ msrs->entries[3].data = ((uint64)sel_cs << 32) | ((uint64)sel_cs_cpl3 << 48);
msrs->entries[4].index = MSR_IA32_LSTAR;
msrs->entries[4].data = ADDR_VAR_SYSRET;
ioctl(cpufd, KVM_SET_MSRS, msrs);
@@ -176,7 +176,7 @@ static void setup_32bit_idt(struct kvm_sregs* sregs, char* host_mem, uintptr_t g
{
sregs->idt.base = guest_mem + ADDR_VAR_IDT;
sregs->idt.limit = 0x1ff;
- uint64_t* idt = (uint64_t*)(host_mem + sregs->idt.base);
+ uint64* idt = (uint64*)(host_mem + sregs->idt.base);
int i;
for (i = 0; i < 32; i++) {
struct kvm_segment gate;
@@ -229,7 +229,7 @@ static void setup_64bit_idt(struct kvm_sregs* sregs, char* host_mem, uintptr_t g
{
sregs->idt.base = guest_mem + ADDR_VAR_IDT;
sregs->idt.limit = 0x1ff;
- uint64_t* idt = (uint64_t*)(host_mem + sregs->idt.base);
+ uint64* idt = (uint64*)(host_mem + sregs->idt.base);
int i;
for (i = 0; i < 32; i++) {
struct kvm_segment gate;
@@ -255,8 +255,8 @@ struct kvm_text {
};
struct kvm_opt {
- uint64_t typ;
- uint64_t val;
+ uint64 typ;
+ uint64 val;
};
#define KVM_SETUP_PAGING (1 << 0)
@@ -323,14 +323,14 @@ static uintptr_t syz_kvm_setup_cpu(uintptr_t a0, uintptr_t a1, uintptr_t a2, uin
regs.rsp = ADDR_STACK0;
sregs.gdt.base = guest_mem + ADDR_GDT;
- sregs.gdt.limit = 256 * sizeof(uint64_t) - 1;
- uint64_t* gdt = (uint64_t*)(host_mem + sregs.gdt.base);
+ sregs.gdt.limit = 256 * sizeof(uint64) - 1;
+ uint64* gdt = (uint64*)(host_mem + sregs.gdt.base);
struct kvm_segment seg_ldt;
seg_ldt.selector = SEL_LDT;
seg_ldt.type = 2;
seg_ldt.base = guest_mem + ADDR_LDT;
- seg_ldt.limit = 256 * sizeof(uint64_t) - 1;
+ seg_ldt.limit = 256 * sizeof(uint64) - 1;
seg_ldt.present = 1;
seg_ldt.dpl = 0;
seg_ldt.s = 0;
@@ -338,7 +338,7 @@ static uintptr_t syz_kvm_setup_cpu(uintptr_t a0, uintptr_t a1, uintptr_t a2, uin
seg_ldt.db = 1;
seg_ldt.l = 0;
sregs.ldt = seg_ldt;
- uint64_t* ldt = (uint64_t*)(host_mem + sregs.ldt.base);
+ uint64* ldt = (uint64*)(host_mem + sregs.ldt.base);
struct kvm_segment seg_cs16;
seg_cs16.selector = SEL_CS16;
@@ -518,8 +518,8 @@ static uintptr_t syz_kvm_setup_cpu(uintptr_t a0, uintptr_t a1, uintptr_t a2, uin
setup_32bit_idt(&sregs, host_mem, guest_mem);
if (flags & KVM_SETUP_PAGING) {
- uint64_t pd_addr = guest_mem + ADDR_PD;
- uint64_t* pd = (uint64_t*)(host_mem + ADDR_PD);
+ uint64 pd_addr = guest_mem + ADDR_PD;
+ uint64* pd = (uint64*)(host_mem + ADDR_PD);
// A single 4MB page to cover the memory region
NONFAILING(pd[0] = PDE32_PRESENT | PDE32_RW | PDE32_USER | PDE32_PS);
sregs.cr3 = pd_addr;
@@ -566,8 +566,8 @@ static uintptr_t syz_kvm_setup_cpu(uintptr_t a0, uintptr_t a1, uintptr_t a2, uin
sregs.cs = seg_cs32;
sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32;
- uint64_t pd_addr = guest_mem + ADDR_PD;
- uint64_t* pd = (uint64_t*)(host_mem + ADDR_PD);
+ uint64 pd_addr = guest_mem + ADDR_PD;
+ uint64* pd = (uint64*)(host_mem + ADDR_PD);
// A single 4MB page to cover the memory region
NONFAILING(pd[0] = PDE32_PRESENT | PDE32_RW | PDE32_USER | PDE32_PS);
sregs.cr3 = pd_addr;
@@ -592,12 +592,12 @@ static uintptr_t syz_kvm_setup_cpu(uintptr_t a0, uintptr_t a1, uintptr_t a2, uin
sregs.cs = seg_cs32;
sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32;
- uint64_t pml4_addr = guest_mem + ADDR_PML4;
- uint64_t* pml4 = (uint64_t*)(host_mem + ADDR_PML4);
- uint64_t pdpt_addr = guest_mem + ADDR_PDP;
- uint64_t* pdpt = (uint64_t*)(host_mem + ADDR_PDP);
- uint64_t pd_addr = guest_mem + ADDR_PD;
- uint64_t* pd = (uint64_t*)(host_mem + ADDR_PD);
+ uint64 pml4_addr = guest_mem + ADDR_PML4;
+ uint64* pml4 = (uint64*)(host_mem + ADDR_PML4);
+ uint64 pdpt_addr = guest_mem + ADDR_PDP;
+ uint64* pdpt = (uint64*)(host_mem + ADDR_PDP);
+ uint64 pd_addr = guest_mem + ADDR_PD;
+ uint64* pd = (uint64*)(host_mem + ADDR_PD);
NONFAILING(pml4[0] = PDE64_PRESENT | PDE64_RW | PDE64_USER | pdpt_addr);
NONFAILING(pdpt[0] = PDE64_PRESENT | PDE64_RW | PDE64_USER | pd_addr);
NONFAILING(pd[0] = PDE64_PRESENT | PDE64_RW | PDE64_USER | PDE64_PS);
@@ -607,10 +607,10 @@ static uintptr_t syz_kvm_setup_cpu(uintptr_t a0, uintptr_t a1, uintptr_t a2, uin
if (flags & KVM_SETUP_VM) {
sregs.cr0 |= CR0_NE;
- NONFAILING(*((uint64_t*)(host_mem + ADDR_VAR_VMXON_PTR)) = ADDR_VAR_VMXON);
- NONFAILING(*((uint64_t*)(host_mem + ADDR_VAR_VMCS_PTR)) = ADDR_VAR_VMCS);
+ NONFAILING(*((uint64*)(host_mem + ADDR_VAR_VMXON_PTR)) = ADDR_VAR_VMXON);
+ NONFAILING(*((uint64*)(host_mem + ADDR_VAR_VMCS_PTR)) = ADDR_VAR_VMCS);
NONFAILING(memcpy(host_mem + ADDR_VAR_VMEXIT_CODE, kvm_asm64_vm_exit, sizeof(kvm_asm64_vm_exit) - 1));
- NONFAILING(*((uint64_t*)(host_mem + ADDR_VAR_VMEXIT_PTR)) = ADDR_VAR_VMEXIT_CODE);
+ NONFAILING(*((uint64*)(host_mem + ADDR_VAR_VMEXIT_PTR)) = ADDR_VAR_VMEXIT_CODE);
text_prefix = kvm_asm64_init_vm;
text_prefix_size = sizeof(kvm_asm64_init_vm) - 1;
@@ -697,12 +697,12 @@ static uintptr_t syz_kvm_setup_cpu(uintptr_t a0, uintptr_t a1, uintptr_t a2, uin
// Replace 0xbadc0de in LJMP with offset of a next instruction.
NONFAILING(patch = memmem(host_text, text_prefix_size, "\xde\xc0\xad\x0b", 4));
if (patch)
- NONFAILING(*((uint32_t*)patch) = guest_mem + ADDR_TEXT + ((char*)patch - host_text) + 6);
- uint16_t magic = PREFIX_SIZE;
+ NONFAILING(*((uint32*)patch) = guest_mem + ADDR_TEXT + ((char*)patch - host_text) + 6);
+ uint16 magic = PREFIX_SIZE;
patch = 0;
NONFAILING(patch = memmem(host_text, text_prefix_size, &magic, sizeof(magic)));
if (patch)
- NONFAILING(*((uint16_t*)patch) = guest_mem + ADDR_TEXT + text_prefix_size);
+ NONFAILING(*((uint16*)patch) = guest_mem + ADDR_TEXT + text_prefix_size);
}
NONFAILING(memcpy((void*)(host_text + text_prefix_size), text, text_size));
NONFAILING(*(host_text + text_prefix_size + text_size) = 0xf4); // hlt
@@ -714,14 +714,14 @@ static uintptr_t syz_kvm_setup_cpu(uintptr_t a0, uintptr_t a1, uintptr_t a2, uin
NONFAILING(memcpy(host_mem + ADDR_VAR_SYSRET, "\x0f\x07\xf4", 3));
NONFAILING(memcpy(host_mem + ADDR_VAR_SYSEXIT, "\x0f\x35\xf4", 3));
- NONFAILING(*(uint64_t*)(host_mem + ADDR_VAR_VMWRITE_FLD) = 0);
- NONFAILING(*(uint64_t*)(host_mem + ADDR_VAR_VMWRITE_VAL) = 0);
+ NONFAILING(*(uint64*)(host_mem + ADDR_VAR_VMWRITE_FLD) = 0);
+ NONFAILING(*(uint64*)(host_mem + ADDR_VAR_VMWRITE_VAL) = 0);
if (opt_count > 2)
opt_count = 2;
for (i = 0; i < opt_count; i++) {
- uint64_t typ = 0;
- uint64_t val = 0;
+ uint64 typ = 0;
+ uint64 val = 0;
NONFAILING(typ = opt_array_ptr[i].typ);
NONFAILING(val = opt_array_ptr[i].val);
switch (typ % 9) {
@@ -766,8 +766,8 @@ static uintptr_t syz_kvm_setup_cpu(uintptr_t a0, uintptr_t a1, uintptr_t a2, uin
seg_ds64_cpl3.type = val & 0xf;
break;
case 8:
- NONFAILING(*(uint64_t*)(host_mem + ADDR_VAR_VMWRITE_FLD) = (val & 0xffff));
- NONFAILING(*(uint64_t*)(host_mem + ADDR_VAR_VMWRITE_VAL) = (val >> 16));
+ NONFAILING(*(uint64*)(host_mem + ADDR_VAR_VMWRITE_FLD) = (val & 0xffff));
+ NONFAILING(*(uint64*)(host_mem + ADDR_VAR_VMWRITE_VAL) = (val >> 16));
break;
default:
fail("bad kvm setup opt");
diff --git a/executor/common_kvm_arm64.h b/executor/common_kvm_arm64.h
index b3dadc87c..868c2fe65 100644
--- a/executor/common_kvm_arm64.h
+++ b/executor/common_kvm_arm64.h
@@ -12,8 +12,8 @@ struct kvm_text {
};
struct kvm_opt {
- uint64_t typ;
- uint64_t val;
+ uint64 typ;
+ uint64 val;
};
// syz_kvm_setup_cpu(fd fd_kvmvm, cpufd fd_kvmcpu, usermem vma[24], text ptr[in, array[kvm_text, 1]], ntext len[text], flags flags[kvm_setup_flags], opts ptr[in, array[kvm_setup_opt, 0:2]], nopt len[opts])
@@ -45,13 +45,13 @@ static uintptr_t syz_kvm_setup_cpu(uintptr_t a0, uintptr_t a1, uintptr_t a2, uin
(void)text_type;
(void)opt_array_ptr;
- uint32_t features = 0;
+ uint32 features = 0;
if (opt_count > 1)
opt_count = 1;
uintptr_t i;
for (i = 0; i < opt_count; i++) {
- uint64_t typ = 0;
- uint64_t val = 0;
+ uint64 typ = 0;
+ uint64 val = 0;
NONFAILING(typ = opt_array_ptr[i].typ);
NONFAILING(val = opt_array_ptr[i].val);
switch (typ) {
diff --git a/executor/common_linux.h b/executor/common_linux.h
index 8919e6b5b..66ba666a7 100644
--- a/executor/common_linux.h
+++ b/executor/common_linux.h
@@ -187,18 +187,18 @@ static void install_segv_handler()
#endif
#if defined(SYZ_EXECUTOR) || (defined(SYZ_REPEAT) && defined(SYZ_WAIT_REPEAT))
-static uint64_t current_time_ms()
+static uint64 current_time_ms()
{
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts))
fail("clock_gettime failed");
- return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000;
+ return (uint64)ts.tv_sec * 1000 + (uint64)ts.tv_nsec / 1000000;
}
#endif
#if defined(SYZ_EXECUTOR)
-static void sleep_ms(uint64_t ms)
+static void sleep_ms(uint64 ms)
{
usleep(ms * 1000);
}
@@ -289,7 +289,7 @@ static int tun_frags_enabled;
#define IFF_NAPI_FRAGS 0x0020
#endif
-static void initialize_tun(uint64_t pid)
+static void initialize_tun(uint64 pid)
{
if (pid >= MAX_PIDS)
fail("tun: no more than %d executors", MAX_PIDS);
@@ -356,7 +356,7 @@ static void initialize_tun(uint64_t pid)
execute_command("ip link set dev %s up", iface);
}
-static void setup_tun(uint64_t pid, bool enable_tun)
+static void setup_tun(uint64 pid, bool enable_tun)
{
if (enable_tun)
initialize_tun(pid);
@@ -399,9 +399,9 @@ static void debug_dump_data(const char* data, int length)
#if defined(SYZ_EXECUTOR) || (defined(__NR_syz_emit_ethernet) && defined(SYZ_TUN_ENABLE))
#define MAX_FRAGS 4
struct vnet_fragmentation {
- uint32_t full;
- uint32_t count;
- uint32_t frags[MAX_FRAGS];
+ uint32 full;
+ uint32 count;
+ uint32 frags[MAX_FRAGS];
};
static uintptr_t syz_emit_ethernet(uintptr_t a0, uintptr_t a1, uintptr_t a2)
@@ -415,26 +415,26 @@ static uintptr_t syz_emit_ethernet(uintptr_t a0, uintptr_t a1, uintptr_t a2)
if (tunfd < 0)
return (uintptr_t)-1;
- uint32_t length = a0;
+ uint32 length = a0;
char* data = (char*)a1;
debug_dump_data(data, length);
struct vnet_fragmentation* frags = (struct vnet_fragmentation*)a2;
struct iovec vecs[MAX_FRAGS + 1];
- uint32_t nfrags = 0;
+ uint32 nfrags = 0;
if (!tun_frags_enabled || frags == NULL) {
vecs[nfrags].iov_base = data;
vecs[nfrags].iov_len = length;
nfrags++;
} else {
bool full = true;
- uint32_t i, count = 0;
+ uint32 i, count = 0;
NONFAILING(full = frags->full);
NONFAILING(count = frags->count);
if (count > MAX_FRAGS)
count = MAX_FRAGS;
for (i = 0; i < count && length != 0; i++) {
- uint32_t size = 0;
+ uint32 size = 0;
NONFAILING(size = frags->frags[i]);
if (size > length)
size = length;
@@ -482,8 +482,8 @@ struct ipv6hdr {
#endif
struct tcp_resources {
- int32_t seq;
- int32_t ack;
+ uint32 seq;
+ uint32 ack;
};
static uintptr_t syz_extract_tcp_res(uintptr_t a0, uintptr_t a1, uintptr_t a2)
@@ -528,8 +528,8 @@ static uintptr_t syz_extract_tcp_res(uintptr_t a0, uintptr_t a1, uintptr_t a2)
}
struct tcp_resources* res = (struct tcp_resources*)a0;
- NONFAILING(res->seq = htonl((ntohl(tcphdr->seq) + (uint32_t)a1)));
- NONFAILING(res->ack = htonl((ntohl(tcphdr->ack_seq) + (uint32_t)a2)));
+ NONFAILING(res->seq = htonl((ntohl(tcphdr->seq) + (uint32)a1)));
+ NONFAILING(res->ack = htonl((ntohl(tcphdr->ack_seq) + (uint32)a2)));
debug("extracted seq: %08x\n", res->seq);
debug("extracted ack: %08x\n", res->ack);
@@ -545,7 +545,7 @@ static uintptr_t syz_open_dev(uintptr_t a0, uintptr_t a1, uintptr_t a2)
// syz_open_dev$char(dev const[0xc], major intptr, minor intptr) fd
// syz_open_dev$block(dev const[0xb], major intptr, minor intptr) fd
char buf[128];
- sprintf(buf, "/dev/%s/%d:%d", a0 == 0xc ? "char" : "block", (uint8_t)a1, (uint8_t)a2);
+ sprintf(buf, "/dev/%s/%d:%d", a0 == 0xc ? "char" : "block", (uint8)a1, (uint8)a2);
return open(buf, O_RDWR, 0);
} else {
// syz_open_dev(dev strconst, id intptr, flags flags[open_flags]) fd
@@ -600,12 +600,12 @@ static uintptr_t syz_open_pts(uintptr_t a0, uintptr_t a1)
static uintptr_t syz_fuse_mount(uintptr_t a0, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5)
{
// syz_fuse_mount(target filename, mode flags[fuse_mode], uid uid, gid gid, maxread intptr, flags flags[mount_flags]) fd[fuse]
- uint64_t target = a0;
- uint64_t mode = a1;
- uint64_t uid = a2;
- uint64_t gid = a3;
- uint64_t maxread = a4;
- uint64_t flags = a5;
+ uint64 target = a0;
+ uint64 mode = a1;
+ uint64 uid = a2;
+ uint64 gid = a3;
+ uint64 maxread = a4;
+ uint64 flags = a5;
int fd = open("/dev/fuse", O_RDWR);
if (fd == -1)
@@ -628,14 +628,14 @@ static uintptr_t syz_fuse_mount(uintptr_t a0, uintptr_t a1, uintptr_t a2, uintpt
static uintptr_t syz_fuseblk_mount(uintptr_t a0, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7)
{
// syz_fuseblk_mount(target filename, blkdev filename, mode flags[fuse_mode], uid uid, gid gid, maxread intptr, blksize intptr, flags flags[mount_flags]) fd[fuse]
- uint64_t target = a0;
- uint64_t blkdev = a1;
- uint64_t mode = a2;
- uint64_t uid = a3;
- uint64_t gid = a4;
- uint64_t maxread = a5;
- uint64_t blksize = a6;
- uint64_t flags = a7;
+ uint64 target = a0;
+ uint64 blkdev = a1;
+ uint64 mode = a2;
+ uint64 uid = a3;
+ uint64 gid = a4;
+ uint64 maxread = a5;
+ uint64 blksize = a6;
+ uint64 flags = a7;
int fd = open("/dev/fuse", O_RDWR);
if (fd == -1)
@@ -1018,7 +1018,7 @@ void loop()
doexit(0);
}
int status = 0;
- uint64_t start = current_time_ms();
+ uint64 start = current_time_ms();
for (;;) {
int res = waitpid(-1, &status, __WALL | WNOHANG);
if (res == pid)
diff --git a/executor/common_windows.h b/executor/common_windows.h
index 0edc473d7..d52092017 100644
--- a/executor/common_windows.h
+++ b/executor/common_windows.h
@@ -33,14 +33,14 @@ static void install_segv_handler()
#endif
#if defined(SYZ_EXECUTOR) || (defined(SYZ_REPEAT) && defined(SYZ_WAIT_REPEAT))
-static uint64_t current_time_ms()
+static uint64 current_time_ms()
{
return GetTickCount64();
}
#endif
#if defined(SYZ_EXECUTOR)
-static void sleep_ms(uint64_t ms)
+static void sleep_ms(uint64 ms)
{
Sleep(ms);
}
diff --git a/executor/executor.h b/executor/executor.h
index 6fb32a76e..2c8b36b8d 100644
--- a/executor/executor.h
+++ b/executor/executor.h
@@ -31,16 +31,16 @@ const int kMaxArgs = 9;
const int kMaxThreads = 16;
const int kMaxCommands = 1000;
-const uint64_t instr_eof = -1;
-const uint64_t instr_copyin = -2;
-const uint64_t instr_copyout = -3;
+const uint64 instr_eof = -1;
+const uint64 instr_copyin = -2;
+const uint64 instr_copyout = -3;
-const uint64_t arg_const = 0;
-const uint64_t arg_result = 1;
-const uint64_t arg_data = 2;
-const uint64_t arg_csum = 3;
+const uint64 arg_const = 0;
+const uint64 arg_result = 1;
+const uint64 arg_data = 2;
+const uint64 arg_csum = 3;
-const uint64_t no_copyout = -1;
+const uint64 no_copyout = -1;
enum sandbox_type {
sandbox_none,
@@ -70,7 +70,7 @@ int flag_fault_nth;
int flag_pid;
int running;
-uint32_t completed;
+uint32 completed;
bool collide;
ALIGNED(64 << 10)
@@ -79,37 +79,37 @@ char input_data[kMaxInput];
// We use the default value instead of results of failed syscalls.
// -1 is an invalid fd and an invalid address and deterministic,
// so good enough for our purposes.
-const uint64_t default_value = -1;
+const uint64 default_value = -1;
// Checksum kinds.
-const uint64_t arg_csum_inet = 0;
+const uint64 arg_csum_inet = 0;
// Checksum chunk kinds.
-const uint64_t arg_csum_chunk_data = 0;
-const uint64_t arg_csum_chunk_const = 1;
+const uint64 arg_csum_chunk_data = 0;
+const uint64 arg_csum_chunk_const = 1;
struct thread_t {
bool created;
int id;
osthread_t th;
// TODO(dvyukov): this assumes 64-bit kernel. This must be "kernel long" somehow.
- uint64_t* cover_data;
+ uint64* cover_data;
// Pointer to the size of coverage (stored as first word of memory).
- uint64_t* cover_size_ptr;
- uint64_t cover_buffer[1]; // fallback coverage buffer
+ uint64* cover_size_ptr;
+ uint64 cover_buffer[1]; // fallback coverage buffer
event_t ready;
event_t done;
- uint64_t* copyout_pos;
- uint64_t copyout_index;
+ uint64* copyout_pos;
+ uint64 copyout_index;
bool handled;
int call_index;
int call_num;
int num_args;
long args[kMaxArgs];
long res;
- uint32_t reserrno;
- uint64_t cover_size;
+ uint32 reserrno;
+ uint64 cover_size;
bool fault_injected;
int cover_fd;
};
@@ -118,38 +118,38 @@ thread_t threads[kMaxThreads];
struct res_t {
bool executed;
- uint64_t val;
+ uint64 val;
};
res_t results[kMaxCommands];
-const uint64_t kInMagic = 0xbadc0ffeebadface;
-const uint32_t kOutMagic = 0xbadf00d;
+const uint64 kInMagic = 0xbadc0ffeebadface;
+const uint32 kOutMagic = 0xbadf00d;
struct handshake_req {
- uint64_t magic;
- uint64_t flags; // env flags
- uint64_t pid;
+ uint64 magic;
+ uint64 flags; // env flags
+ uint64 pid;
};
struct handshake_reply {
- uint32_t magic;
+ uint32 magic;
};
struct execute_req {
- uint64_t magic;
- uint64_t env_flags;
- uint64_t exec_flags;
- uint64_t pid;
- uint64_t fault_call;
- uint64_t fault_nth;
- uint64_t prog_size;
+ uint64 magic;
+ uint64 env_flags;
+ uint64 exec_flags;
+ uint64 pid;
+ uint64 fault_call;
+ uint64 fault_nth;
+ uint64 prog_size;
};
struct execute_reply {
- uint32_t magic;
- uint32_t done;
- uint32_t status;
+ uint32 magic;
+ uint32 done;
+ uint32 status;
};
enum {
@@ -162,10 +162,10 @@ enum {
};
struct kcov_comparison_t {
- uint64_t type;
- uint64_t arg1;
- uint64_t arg2;
- uint64_t pc;
+ uint64 type;
+ uint64 arg1;
+ uint64 arg2;
+ uint64 pc;
bool ignore() const;
void write();
@@ -174,25 +174,25 @@ struct kcov_comparison_t {
};
long execute_syscall(call_t* c, long a0, long a1, long a2, long a3, long a4, long a5, long a6, long a7, long a8);
-thread_t* schedule_call(int call_index, int call_num, uint64_t copyout_index, uint64_t num_args, uint64_t* args, uint64_t* pos);
+thread_t* schedule_call(int call_index, int call_num, uint64 copyout_index, uint64 num_args, uint64* args, uint64* pos);
void handle_completion(thread_t* th);
void execute_call(thread_t* th);
void thread_create(thread_t* th, int id);
void* worker_thread(void* arg);
-uint32_t* write_output(uint32_t v);
-void write_completed(uint32_t completed);
-uint64_t read_input(uint64_t** input_posp, bool peek = false);
-uint64_t read_arg(uint64_t** input_posp);
-uint64_t read_const_arg(uint64_t** input_posp, uint64_t* size_p, uint64_t* bf_off_p, uint64_t* bf_len_p);
-uint64_t read_result(uint64_t** input_posp);
-void copyin(char* addr, uint64_t val, uint64_t size, uint64_t bf_off, uint64_t bf_len);
-uint64_t copyout(char* addr, uint64_t size);
+uint32* write_output(uint32 v);
+void write_completed(uint32 completed);
+uint64 read_input(uint64** input_posp, bool peek = false);
+uint64 read_arg(uint64** input_posp);
+uint64 read_const_arg(uint64** input_posp, uint64* size_p, uint64* bf_off_p, uint64* bf_len_p);
+uint64 read_result(uint64** input_posp);
+void copyin(char* addr, uint64 val, uint64 size, uint64 bf_off, uint64 bf_len);
+uint64 copyout(char* addr, uint64 size);
void cover_open();
void cover_enable(thread_t* th);
void cover_reset(thread_t* th);
-uint64_t read_cover_size(thread_t* th);
-static uint32_t hash(uint32_t a);
-static bool dedup(uint32_t sig);
+uint64 read_cover_size(thread_t* th);
+static uint32 hash(uint32 a);
+static bool dedup(uint32 sig);
void setup_control_pipes()
{
@@ -206,7 +206,7 @@ void setup_control_pipes()
fail("close(0) failed");
}
-void parse_env_flags(uint64_t flags)
+void parse_env_flags(uint64 flags)
{
flag_debug = flags & (1 << 0);
flag_cover = flags & (1 << 1);
@@ -269,7 +269,7 @@ void receive_execute(bool need_prog)
fail("need_prog: no program");
return;
}
- uint64_t pos = 0;
+ uint64 pos = 0;
for (;;) {
ssize_t rv = read(kInPipeFd, input_data + pos, sizeof(input_data) - pos);
if (rv < 0)
@@ -296,7 +296,7 @@ void reply_execute(int status)
void execute_one()
{
retry:
- uint64_t* input_pos = (uint64_t*)input_data;
+ uint64* input_pos = (uint64*)input_data;
write_output(0); // Number of executed syscalls (updated later).
if (!collide && !flag_threaded)
@@ -304,39 +304,39 @@ retry:
int call_index = 0;
for (;;) {
- uint64_t call_num = read_input(&input_pos);
+ uint64 call_num = read_input(&input_pos);
if (call_num == instr_eof)
break;
if (call_num == instr_copyin) {
char* addr = (char*)read_input(&input_pos);
- uint64_t typ = read_input(&input_pos);
+ uint64 typ = read_input(&input_pos);
debug("copyin to %p\n", addr);
switch (typ) {
case arg_const: {
- uint64_t size, bf_off, bf_len;
- uint64_t arg = read_const_arg(&input_pos, &size, &bf_off, &bf_len);
+ uint64 size, bf_off, bf_len;
+ uint64 arg = read_const_arg(&input_pos, &size, &bf_off, &bf_len);
copyin(addr, arg, size, bf_off, bf_len);
break;
}
case arg_result: {
- uint64_t size = read_input(&input_pos);
- uint64_t val = read_result(&input_pos);
+ uint64 size = read_input(&input_pos);
+ uint64 val = read_result(&input_pos);
copyin(addr, val, size, 0, 0);
break;
}
case arg_data: {
- uint64_t size = read_input(&input_pos);
+ uint64 size = read_input(&input_pos);
NONFAILING(memcpy(addr, input_pos, size));
// Read out the data.
- for (uint64_t i = 0; i < (size + 7) / 8; i++)
+ for (uint64 i = 0; i < (size + 7) / 8; i++)
read_input(&input_pos);
break;
}
case arg_csum: {
debug("checksum found at %p\n", addr);
- uint64_t size = read_input(&input_pos);
+ uint64 size = read_input(&input_pos);
char* csum_addr = addr;
- uint64_t csum_kind = read_input(&input_pos);
+ uint64 csum_kind = read_input(&input_pos);
switch (csum_kind) {
case arg_csum_inet: {
if (size != 2) {
@@ -345,16 +345,16 @@ retry:
debug("calculating checksum for %p\n", csum_addr);
struct csum_inet csum;
csum_inet_init(&csum);
- uint64_t chunks_num = read_input(&input_pos);
- uint64_t chunk;
+ uint64 chunks_num = read_input(&input_pos);
+ uint64 chunk;
for (chunk = 0; chunk < chunks_num; chunk++) {
- uint64_t chunk_kind = read_input(&input_pos);
- uint64_t chunk_value = read_input(&input_pos);
- uint64_t chunk_size = read_input(&input_pos);
+ uint64 chunk_kind = read_input(&input_pos);
+ uint64 chunk_value = read_input(&input_pos);
+ uint64 chunk_size = read_input(&input_pos);
switch (chunk_kind) {
case arg_csum_chunk_data:
debug("#%lld: data chunk, addr: %llx, size: %llu\n", chunk, chunk_value, chunk_size);
- NONFAILING(csum_inet_update(&csum, (const uint8_t*)chunk_value, chunk_size));
+ NONFAILING(csum_inet_update(&csum, (const uint8*)chunk_value, chunk_size));
break;
case arg_csum_chunk_const:
if (chunk_size != 2 && chunk_size != 4 && chunk_size != 8) {
@@ -362,13 +362,13 @@ retry:
}
// Here we assume that const values come to us big endian.
debug("#%lld: const chunk, value: %llx, size: %llu\n", chunk, chunk_value, chunk_size);
- csum_inet_update(&csum, (const uint8_t*)&chunk_value, chunk_size);
+ csum_inet_update(&csum, (const uint8*)&chunk_value, chunk_size);
break;
default:
fail("bad checksum chunk kind %llu", chunk_kind);
}
}
- int16_t csum_value = csum_inet_digest(&csum);
+ uint16 csum_value = csum_inet_digest(&csum);
debug("writing inet checksum %hx to %p\n", csum_value, csum_addr);
copyin(csum_addr, csum_value, 2, 0, 0);
break;
@@ -394,14 +394,14 @@ retry:
// Normal syscall.
if (call_num >= syscall_count)
fail("invalid command number %llu", call_num);
- uint64_t copyout_index = read_input(&input_pos);
- uint64_t num_args = read_input(&input_pos);
+ uint64 copyout_index = read_input(&input_pos);
+ uint64 num_args = read_input(&input_pos);
if (num_args > kMaxArgs)
fail("command has bad number of arguments %llu", num_args);
- uint64_t args[kMaxArgs] = {};
- for (uint64_t i = 0; i < num_args; i++)
+ uint64 args[kMaxArgs] = {};
+ for (uint64 i = 0; i < num_args; i++)
args[i] = read_arg(&input_pos);
- for (uint64_t i = num_args; i < 6; i++)
+ for (uint64 i = num_args; i < 6; i++)
args[i] = 0;
thread_t* th = schedule_call(call_index++, call_num, copyout_index, num_args, args, input_pos);
@@ -412,7 +412,7 @@ retry:
// Wait for call completion.
// Note: sys knows about this 20ms timeout when it generates
// timespec/timeval values.
- const uint64_t timeout_ms = flag_debug ? 500 : 20;
+ const uint64 timeout_ms = flag_debug ? 500 : 20;
if (event_timedwait(&th->done, timeout_ms))
handle_completion(th);
// Check if any of previous calls have completed.
@@ -445,7 +445,7 @@ retry:
}
}
-thread_t* schedule_call(int call_index, int call_num, uint64_t copyout_index, uint64_t num_args, uint64_t* args, uint64_t* pos)
+thread_t* schedule_call(int call_index, int call_num, uint64 copyout_index, uint64 num_args, uint64* args, uint64* pos)
{
// Find a spare thread to execute the call.
int i;
@@ -494,13 +494,13 @@ void handle_completion(thread_t* th)
results[th->copyout_index].val = th->res;
}
for (bool done = false; !done;) {
- uint64_t instr = read_input(&th->copyout_pos);
+ uint64 instr = read_input(&th->copyout_pos);
switch (instr) {
case instr_copyout: {
- uint64_t index = read_input(&th->copyout_pos);
+ uint64 index = read_input(&th->copyout_pos);
char* addr = (char*)read_input(&th->copyout_pos);
- uint64_t size = read_input(&th->copyout_pos);
- uint64_t val = copyout(addr, size);
+ uint64 size = read_input(&th->copyout_pos);
+ uint64 val = copyout(addr, size);
if (index >= kMaxCommands)
fail("result idx %lld overflows kMaxCommands", index);
results[index].executed = true;
@@ -517,24 +517,24 @@ void handle_completion(thread_t* th)
if (!collide) {
write_output(th->call_index);
write_output(th->call_num);
- uint32_t reserrno = th->res != -1 ? 0 : th->reserrno;
+ uint32 reserrno = th->res != -1 ? 0 : th->reserrno;
write_output(reserrno);
write_output(th->fault_injected);
- uint32_t* signal_count_pos = write_output(0); // filled in later
- uint32_t* cover_count_pos = write_output(0); // filled in later
- uint32_t* comps_count_pos = write_output(0); // filled in later
- uint32_t nsig = 0, cover_size = 0, comps_size = 0;
+ uint32* signal_count_pos = write_output(0); // filled in later
+ uint32* cover_count_pos = write_output(0); // filled in later
+ uint32* comps_count_pos = write_output(0); // filled in later
+ uint32 nsig = 0, cover_size = 0, comps_size = 0;
if (flag_collect_comps) {
// Collect only the comparisons
- uint32_t ncomps = th->cover_size;
+ uint32 ncomps = th->cover_size;
kcov_comparison_t* start = (kcov_comparison_t*)th->cover_data;
kcov_comparison_t* end = start + ncomps;
- if ((uint64_t*)end >= th->cover_data + kCoverSize)
+ if ((uint64*)end >= th->cover_data + kCoverSize)
fail("too many comparisons %u", ncomps);
std::sort(start, end);
ncomps = std::unique(start, end) - start;
- for (uint32_t i = 0; i < ncomps; ++i) {
+ for (uint32 i = 0; i < ncomps; ++i) {
if (start[i].ignore())
continue;
comps_size++;
@@ -544,10 +544,10 @@ void handle_completion(thread_t* th)
// Write out feedback signals.
// Currently it is code edges computed as xor of
// two subsequent basic block PCs.
- uint32_t prev = 0;
- for (uint32_t i = 0; i < th->cover_size; i++) {
- uint32_t pc = (uint32_t)th->cover_data[i];
- uint32_t sig = pc ^ prev;
+ uint32 prev = 0;
+ for (uint32 i = 0; i < th->cover_size; i++) {
+ uint32 pc = (uint32)th->cover_data[i];
+ uint32 sig = pc ^ prev;
prev = hash(pc);
if (dedup(sig))
continue;
@@ -558,15 +558,15 @@ void handle_completion(thread_t* th)
// Write out real coverage (basic block PCs).
cover_size = th->cover_size;
if (flag_dedup_cover) {
- uint64_t* start = (uint64_t*)th->cover_data;
- uint64_t* end = start + cover_size;
+ uint64* start = (uint64*)th->cover_data;
+ uint64* end = start + cover_size;
std::sort(start, end);
cover_size = std::unique(start, end) - start;
}
- // Truncate PCs to uint32_t assuming that they fit into 32-bits.
+ // Truncate PCs to uint32 assuming that they fit into 32-bits.
// True for x86_64 and arm64 without KASLR.
- for (uint32_t i = 0; i < cover_size; i++)
- write_output((uint32_t)th->cover_data[i]);
+ for (uint32 i = 0; i < cover_size; i++)
+ write_output((uint32)th->cover_data[i]);
}
}
// Write out real coverage (basic block PCs).
@@ -650,7 +650,7 @@ void execute_call(thread_t* th)
event_set(&th->done);
}
-static uint32_t hash(uint32_t a)
+static uint32 hash(uint32 a)
{
a = (a ^ 61) ^ (a >> 16);
a = a + (a << 3);
@@ -660,16 +660,16 @@ static uint32_t hash(uint32_t a)
return a;
}
-const uint32_t dedup_table_size = 8 << 10;
-uint32_t dedup_table[dedup_table_size];
+const uint32 dedup_table_size = 8 << 10;
+uint32 dedup_table[dedup_table_size];
// Poorman's best-effort hashmap-based deduplication.
// The hashmap is global which means that we deduplicate across different calls.
// This is OK because we are interested only in new signals.
-static bool dedup(uint32_t sig)
+static bool dedup(uint32 sig)
{
- for (uint32_t i = 0; i < 4; i++) {
- uint32_t pos = (sig + i) % dedup_table_size;
+ for (uint32 i = 0; i < 4; i++) {
+ uint32 pos = (sig + i) % dedup_table_size;
if (dedup_table[pos] == sig)
return true;
if (dedup_table[pos] == 0) {
@@ -681,41 +681,41 @@ static bool dedup(uint32_t sig)
return false;
}
-void copyin(char* addr, uint64_t val, uint64_t size, uint64_t bf_off, uint64_t bf_len)
+void copyin(char* addr, uint64 val, uint64 size, uint64 bf_off, uint64 bf_len)
{
NONFAILING(switch (size) {
case 1:
- STORE_BY_BITMASK(uint8_t, addr, val, bf_off, bf_len);
+ STORE_BY_BITMASK(uint8, addr, val, bf_off, bf_len);
break;
case 2:
- STORE_BY_BITMASK(uint16_t, addr, val, bf_off, bf_len);
+ STORE_BY_BITMASK(uint16, addr, val, bf_off, bf_len);
break;
case 4:
- STORE_BY_BITMASK(uint32_t, addr, val, bf_off, bf_len);
+ STORE_BY_BITMASK(uint32, addr, val, bf_off, bf_len);
break;
case 8:
- STORE_BY_BITMASK(uint64_t, addr, val, bf_off, bf_len);
+ STORE_BY_BITMASK(uint64, addr, val, bf_off, bf_len);
break;
default:
fail("copyin: bad argument size %llu", size);
});
}
-uint64_t copyout(char* addr, uint64_t size)
+uint64 copyout(char* addr, uint64 size)
{
- uint64_t res = default_value;
+ uint64 res = default_value;
NONFAILING(switch (size) {
case 1:
- res = *(uint8_t*)addr;
+ res = *(uint8*)addr;
break;
case 2:
- res = *(uint16_t*)addr;
+ res = *(uint16*)addr;
break;
case 4:
- res = *(uint32_t*)addr;
+ res = *(uint32*)addr;
break;
case 8:
- res = *(uint64_t*)addr;
+ res = *(uint64*)addr;
break;
default:
fail("copyout: bad argument size %llu", size);
@@ -723,12 +723,12 @@ uint64_t copyout(char* addr, uint64_t size)
return res;
}
-uint64_t read_arg(uint64_t** input_posp)
+uint64 read_arg(uint64** input_posp)
{
- uint64_t typ = read_input(input_posp);
+ uint64 typ = read_input(input_posp);
switch (typ) {
case arg_const: {
- uint64_t size, bf_off, bf_len;
+ uint64 size, bf_off, bf_len;
return read_const_arg(input_posp, &size, &bf_off, &bf_len);
}
case arg_result: {
@@ -740,15 +740,15 @@ uint64_t read_arg(uint64_t** input_posp)
}
}
-uint64_t read_const_arg(uint64_t** input_posp, uint64_t* size_p, uint64_t* bf_off_p, uint64_t* bf_len_p)
+uint64 read_const_arg(uint64** input_posp, uint64* size_p, uint64* bf_off_p, uint64* bf_len_p)
{
- uint64_t meta = read_input(input_posp);
- uint64_t val = read_input(input_posp);
+ uint64 meta = read_input(input_posp);
+ uint64 val = read_input(input_posp);
*size_p = meta & 0xff;
bool be = meta & (1 << 8);
*bf_off_p = (meta >> 16) & 0xff;
*bf_len_p = (meta >> 24) & 0xff;
- uint64_t pid_stride = meta >> 32;
+ uint64 pid_stride = meta >> 32;
val += pid_stride * flag_pid;
if (be) {
switch (*size_p) {
@@ -768,14 +768,14 @@ uint64_t read_const_arg(uint64_t** input_posp, uint64_t* size_p, uint64_t* bf_of
return val;
}
-uint64_t read_result(uint64_t** input_posp)
+uint64 read_result(uint64** input_posp)
{
- uint64_t idx = read_input(input_posp);
- uint64_t op_div = read_input(input_posp);
- uint64_t op_add = read_input(input_posp);
+ uint64 idx = read_input(input_posp);
+ uint64 op_div = read_input(input_posp);
+ uint64 op_add = read_input(input_posp);
if (idx >= kMaxCommands)
fail("command refers to bad result %lld", idx);
- uint64_t arg = default_value;
+ uint64 arg = default_value;
if (results[idx].executed) {
arg = results[idx].val;
if (op_div != 0)
@@ -785,9 +785,9 @@ uint64_t read_result(uint64_t** input_posp)
return arg;
}
-uint64_t read_input(uint64_t** input_posp, bool peek)
+uint64 read_input(uint64** input_posp, bool peek)
{
- uint64_t* input_pos = *input_posp;
+ uint64* input_pos = *input_posp;
if ((char*)input_pos >= input_data + kMaxInput)
fail("input command overflows input");
if (!peek)
@@ -798,39 +798,39 @@ uint64_t read_input(uint64_t** input_posp, bool peek)
void kcov_comparison_t::write()
{
// Write order: type arg1 arg2 pc.
- write_output((uint32_t)type);
+ write_output((uint32)type);
// KCOV converts all arguments of size x first to uintx_t and then to
- // uint64_t. We want to properly extend signed values, e.g we want
- // int8_t c = 0xfe to be represented as 0xfffffffffffffffe.
- // Note that uint8_t c = 0xfe will be represented the same way.
+ // uint64. We want to properly extend signed values, e.g we want
+ // int8 c = 0xfe to be represented as 0xfffffffffffffffe.
+ // Note that uint8 c = 0xfe will be represented the same way.
// This is ok because during hints processing we will anyways try
// the value 0x00000000000000fe.
switch (type & KCOV_CMP_SIZE_MASK) {
case KCOV_CMP_SIZE1:
- arg1 = (uint64_t)(int64_t)(int8_t)arg1;
- arg2 = (uint64_t)(int64_t)(int8_t)arg2;
+ arg1 = (uint64)(long long)(signed char)arg1;
+ arg2 = (uint64)(long long)(signed char)arg2;
break;
case KCOV_CMP_SIZE2:
- arg1 = (uint64_t)(int64_t)(int16_t)arg1;
- arg2 = (uint64_t)(int64_t)(int16_t)arg2;
+ arg1 = (uint64)(long long)(short)arg1;
+ arg2 = (uint64)(long long)(short)arg2;
break;
case KCOV_CMP_SIZE4:
- arg1 = (uint64_t)(int64_t)(int32_t)arg1;
- arg2 = (uint64_t)(int64_t)(int32_t)arg2;
+ arg1 = (uint64)(long long)(int)arg1;
+ arg2 = (uint64)(long long)(int)arg2;
break;
}
bool is_size_8 = (type & KCOV_CMP_SIZE_MASK) == KCOV_CMP_SIZE8;
if (!is_size_8) {
- write_output((uint32_t)arg1);
- write_output((uint32_t)arg2);
+ write_output((uint32)arg1);
+ write_output((uint32)arg2);
return;
}
// If we have 64 bits arguments then write them in Little-endian.
- write_output((uint32_t)(arg1 & 0xFFFFFFFF));
- write_output((uint32_t)(arg1 >> 32));
- write_output((uint32_t)(arg2 & 0xFFFFFFFF));
- write_output((uint32_t)(arg2 >> 32));
+ write_output((uint32)(arg1 & 0xFFFFFFFF));
+ write_output((uint32)(arg1 >> 32));
+ write_output((uint32)(arg2 & 0xFFFFFFFF));
+ write_output((uint32)(arg2 >> 32));
}
bool kcov_comparison_t::operator==(const struct kcov_comparison_t& other) const
diff --git a/executor/executor_akaros.cc b/executor/executor_akaros.cc
index 8cb87d3d7..90b589e77 100644
--- a/executor/executor_akaros.cc
+++ b/executor/executor_akaros.cc
@@ -12,7 +12,7 @@
#include "syscalls_akaros.h"
-uint32_t output;
+uint32 output;
int main(int argc, char** argv)
{
@@ -43,13 +43,13 @@ int main(int argc, char** argv)
doexit(0);
}
int status = 0;
- uint64_t start = current_time_ms();
+ uint64 start = current_time_ms();
for (;;) {
int res = waitpid(pid, &status, WNOHANG);
if (res == pid)
break;
sleep_ms(10);
- uint64_t now = current_time_ms();
+ uint64 now = current_time_ms();
if (now - start < 3 * 1000)
continue;
kill(pid, SIGKILL);
@@ -85,17 +85,17 @@ void cover_reset(thread_t* th)
{
}
-uint64_t read_cover_size(thread_t* th)
+uint64 read_cover_size(thread_t* th)
{
return 0;
}
-uint32_t* write_output(uint32_t v)
+uint32* write_output(uint32 v)
{
return &output;
}
-void write_completed(uint32_t completed)
+void write_completed(uint32 completed)
{
}
diff --git a/executor/executor_bsd.cc b/executor/executor_bsd.cc
index da5e10418..2500a2a12 100644
--- a/executor/executor_bsd.cc
+++ b/executor/executor_bsd.cc
@@ -30,8 +30,8 @@
const int kInFd = 3;
const int kOutFd = 4;
-uint32_t* output_data;
-uint32_t* output_pos;
+uint32* output_data;
+uint32* output_pos;
int main(int argc, char** argv)
{
@@ -47,7 +47,7 @@ int main(int argc, char** argv)
// But fuzzer constantly invents new ways of how to currupt the region,
// so we map the region at a (hopefully) hard to guess address surrounded by unmapped pages.
void* const kOutputDataAddr = (void*)0x1ddbc20000;
- output_data = (uint32_t*)mmap(kOutputDataAddr, kMaxOutput, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, kOutFd, 0);
+ output_data = (uint32*)mmap(kOutputDataAddr, kMaxOutput, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, kOutFd, 0);
if (output_data != kOutputDataAddr)
fail("mmap of output file failed");
// Prevent random programs to mess with these fds.
@@ -97,16 +97,16 @@ int main(int argc, char** argv)
doexit(0);
}
int status = 0;
- uint64_t start = current_time_ms();
- uint64_t last_executed = start;
- uint32_t executed_calls = __atomic_load_n(output_data, __ATOMIC_RELAXED);
+ uint64 start = current_time_ms();
+ uint64 last_executed = start;
+ uint32 executed_calls = __atomic_load_n(output_data, __ATOMIC_RELAXED);
for (;;) {
int res = waitpid(pid, &status, WNOHANG);
if (res == pid)
break;
sleep_ms(1);
- uint64_t now = current_time_ms();
- uint32_t now_executed = __atomic_load_n(output_data, __ATOMIC_RELAXED);
+ uint64 now = current_time_ms();
+ uint32 now_executed = __atomic_load_n(output_data, __ATOMIC_RELAXED);
if (executed_calls != now_executed) {
executed_calls = now_executed;
last_executed = now;
@@ -154,7 +154,7 @@ void cover_reset(thread_t* th)
{
}
-uint64_t read_cover_size(thread_t* th)
+uint64 read_cover_size(thread_t* th)
{
if (!flag_cover)
return 0;
@@ -165,7 +165,7 @@ uint64_t read_cover_size(thread_t* th)
return 1;
}
-uint32_t* write_output(uint32_t v)
+uint32* write_output(uint32 v)
{
if (collide)
return 0;
@@ -175,7 +175,7 @@ uint32_t* write_output(uint32_t v)
return output_pos++;
}
-void write_completed(uint32_t completed)
+void write_completed(uint32 completed)
{
__atomic_store_n(output_data, completed, __ATOMIC_RELEASE);
}
diff --git a/executor/executor_fuchsia.cc b/executor/executor_fuchsia.cc
index 05ded7857..bd66762a7 100644
--- a/executor/executor_fuchsia.cc
+++ b/executor/executor_fuchsia.cc
@@ -12,7 +12,7 @@
#include "syscalls_fuchsia.h"
-uint32_t output;
+uint32 output;
int main(int argc, char** argv)
{
@@ -48,17 +48,17 @@ void cover_reset(thread_t* th)
{
}
-uint64_t read_cover_size(thread_t* th)
+uint64 read_cover_size(thread_t* th)
{
return 0;
}
-uint32_t* write_output(uint32_t v)
+uint32* write_output(uint32 v)
{
return &output;
}
-void write_completed(uint32_t completed)
+void write_completed(uint32 completed)
{
}
diff --git a/executor/executor_linux.cc b/executor/executor_linux.cc
index b71290825..9511f1d57 100644
--- a/executor/executor_linux.cc
+++ b/executor/executor_linux.cc
@@ -5,6 +5,7 @@
#include <fcntl.h>
#include <limits.h>
+#include <pthread.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/prctl.h>
@@ -38,8 +39,8 @@ const int kOutFd = 4;
// The address chosen must also work on 32-bit kernels with 2GB user address space.
void* const kOutputDataAddr = (void*)0x1b9bc20000ull;
-uint32_t* output_data;
-uint32_t* output_pos;
+uint32* output_data;
+uint32* output_pos;
int main(int argc, char** argv)
{
@@ -55,7 +56,7 @@ int main(int argc, char** argv)
// If it is corrupted ipc package will fail to parse its contents and panic.
// But fuzzer constantly invents new ways of how to currupt the region,
// so we map the region at a (hopefully) hard to guess address surrounded by unmapped pages.
- output_data = (uint32_t*)mmap(kOutputDataAddr, kMaxOutput, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, kOutFd, 0);
+ output_data = (uint32*)mmap(kOutputDataAddr, kMaxOutput, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, kOutFd, 0);
if (output_data != kOutputDataAddr)
fail("mmap of output file failed");
// Prevent random programs to mess with these fds.
@@ -158,9 +159,9 @@ void loop()
// SIGCHLD should also unblock the usleep below, so the spin loop
// should be as efficient as sigtimedwait.
int status = 0;
- uint64_t start = current_time_ms();
- uint64_t last_executed = start;
- uint32_t executed_calls = __atomic_load_n(output_data, __ATOMIC_RELAXED);
+ uint64 start = current_time_ms();
+ uint64 last_executed = start;
+ uint32 executed_calls = __atomic_load_n(output_data, __ATOMIC_RELAXED);
for (;;) {
int res = waitpid(-1, &status, __WALL | WNOHANG);
int errno0 = errno;
@@ -179,8 +180,8 @@ void loop()
// then the main thread hangs when it wants to page in a page.
// Below we check if the test process still executes syscalls
// and kill it after 200ms of inactivity.
- uint64_t now = current_time_ms();
- uint32_t now_executed = __atomic_load_n(output_data, __ATOMIC_RELAXED);
+ uint64 now = current_time_ms();
+ uint32 now_executed = __atomic_load_n(output_data, __ATOMIC_RELAXED);
if (executed_calls != now_executed) {
executed_calls = now_executed;
last_executed = now;
@@ -228,8 +229,8 @@ void cover_open()
if (ioctl(th->cover_fd, KCOV_INIT_TRACE, kCoverSize))
fail("cover init trace write failed");
size_t mmap_alloc_size = kCoverSize * sizeof(th->cover_data[0]);
- uint64_t* mmap_ptr = (uint64_t*)mmap(NULL, mmap_alloc_size,
- PROT_READ | PROT_WRITE, MAP_SHARED, th->cover_fd, 0);
+ uint64* mmap_ptr = (uint64*)mmap(NULL, mmap_alloc_size,
+ PROT_READ | PROT_WRITE, MAP_SHARED, th->cover_fd, 0);
if (mmap_ptr == MAP_FAILED)
fail("cover mmap failed");
th->cover_size_ptr = mmap_ptr;
@@ -258,18 +259,18 @@ void cover_reset(thread_t* th)
__atomic_store_n(th->cover_size_ptr, 0, __ATOMIC_RELAXED);
}
-uint64_t read_cover_size(thread_t* th)
+uint64 read_cover_size(thread_t* th)
{
if (!flag_cover)
return 0;
- uint64_t n = __atomic_load_n(th->cover_size_ptr, __ATOMIC_RELAXED);
+ uint64 n = __atomic_load_n(th->cover_size_ptr, __ATOMIC_RELAXED);
debug("#%d: read cover size = %llu\n", th->id, n);
if (n >= kCoverSize)
fail("#%d: too much cover %llu", th->id, n);
return n;
}
-uint32_t* write_output(uint32_t v)
+uint32* write_output(uint32 v)
{
if (collide)
return 0;
@@ -279,7 +280,7 @@ uint32_t* write_output(uint32_t v)
return output_pos++;
}
-void write_completed(uint32_t completed)
+void write_completed(uint32 completed)
{
__atomic_store_n(output_data, completed, __ATOMIC_RELEASE);
}
@@ -293,8 +294,8 @@ bool kcov_comparison_t::ignore() const
// This can be a pointer (assuming 64-bit kernel).
// First of all, we want avert fuzzer from our output region.
// Without this fuzzer manages to discover and corrupt it.
- uint64_t out_start = (uint64_t)kOutputDataAddr;
- uint64_t out_end = out_start + kMaxOutput;
+ uint64 out_start = (uint64)kOutputDataAddr;
+ uint64 out_end = out_start + kMaxOutput;
if (arg1 >= out_start && arg1 <= out_end)
return true;
if (arg2 >= out_start && arg2 <= out_end)
@@ -303,8 +304,8 @@ bool kcov_comparison_t::ignore() const
// Filter out kernel physical memory addresses.
// These are internal kernel comparisons and should not be interesting.
// The range covers first 1TB of physical mapping.
- uint64_t kmem_start = (uint64_t)0xffff880000000000ull;
- uint64_t kmem_end = (uint64_t)0xffff890000000000ull;
+ uint64 kmem_start = (uint64)0xffff880000000000ull;
+ uint64 kmem_end = (uint64)0xffff890000000000ull;
bool kptr1 = arg1 >= kmem_start && arg1 <= kmem_end;
bool kptr2 = arg2 >= kmem_start && arg2 <= kmem_end;
if (kptr1 && kptr2)
diff --git a/executor/executor_linux.h b/executor/executor_linux.h
index 5ede578f1..cf01327a7 100644
--- a/executor/executor_linux.h
+++ b/executor/executor_linux.h
@@ -48,10 +48,10 @@ bool event_isset(event_t* ev)
return __atomic_load_n(&ev->state, __ATOMIC_ACQUIRE);
}
-bool event_timedwait(event_t* ev, uint64_t timeout_ms)
+bool event_timedwait(event_t* ev, uint64 timeout_ms)
{
- uint64_t start = current_time_ms();
- uint64_t now = start;
+ uint64 start = current_time_ms();
+ uint64 now = start;
for (;;) {
timespec ts = {};
ts.tv_sec = 0;
diff --git a/executor/executor_posix.h b/executor/executor_posix.h
index e9b06b807..3e8b78b3e 100644
--- a/executor/executor_posix.h
+++ b/executor/executor_posix.h
@@ -61,14 +61,14 @@ bool event_isset(event_t* ev)
return res;
}
-bool event_timedwait(event_t* ev, uint64_t timeout_ms)
+bool event_timedwait(event_t* ev, uint64 timeout_ms)
{
pthread_mutex_lock(&ev->mu);
- uint64_t start = current_time_ms();
+ uint64 start = current_time_ms();
for (;;) {
if (ev->state)
break;
- uint64_t now = current_time_ms();
+ uint64 now = current_time_ms();
if (now - start > timeout_ms)
break;
timespec ts;
diff --git a/executor/executor_windows.cc b/executor/executor_windows.cc
index 90091c833..778387b42 100644
--- a/executor/executor_windows.cc
+++ b/executor/executor_windows.cc
@@ -14,7 +14,7 @@
#include "syscalls_windows.h"
-uint32_t output;
+uint32 output;
int main(int argc, char** argv)
{
@@ -50,17 +50,17 @@ void cover_reset(thread_t* th)
{
}
-uint64_t read_cover_size(thread_t* th)
+uint64 read_cover_size(thread_t* th)
{
return 0;
}
-uint32_t* write_output(uint32_t v)
+uint32* write_output(uint32 v)
{
return &output;
}
-void write_completed(uint32_t completed)
+void write_completed(uint32 completed)
{
}
diff --git a/executor/executor_windows.h b/executor/executor_windows.h
index 5b788e095..b29095011 100644
--- a/executor/executor_windows.h
+++ b/executor/executor_windows.h
@@ -56,14 +56,14 @@ bool event_isset(event_t* ev)
return res;
}
-bool event_timedwait(event_t* ev, uint64_t timeout_ms)
+bool event_timedwait(event_t* ev, uint64 timeout_ms)
{
EnterCriticalSection(&ev->cs);
- uint64_t start = current_time_ms();
+ uint64 start = current_time_ms();
for (;;) {
if (ev->state)
break;
- uint64_t now = current_time_ms();
+ uint64 now = current_time_ms();
if (now - start > timeout_ms)
break;
SleepConditionVariableCS(&ev->cv, &ev->cs, timeout_ms - (now - start));
diff --git a/executor/test_executor_linux.cc b/executor/test_executor_linux.cc
index 5ba8d91ca..01180624b 100644
--- a/executor/test_executor_linux.cc
+++ b/executor/test_executor_linux.cc
@@ -15,12 +15,12 @@ void loop()
extern "C" int test_copyin()
{
unsigned char x[4] = {};
- STORE_BY_BITMASK(uint16_t, &x[1], 0x1234, 0, 0);
+ STORE_BY_BITMASK(uint16, &x[1], 0x1234, 0, 0);
if (x[0] != 0 || x[1] != 0x34 || x[2] != 0x12 || x[3] != 0) {
printf("bad result of STORE_BY_BITMASK(0, 0): %x %x %x %x\n", x[0], x[1], x[2], x[3]);
return 1;
}
- STORE_BY_BITMASK(uint16_t, &x[1], 0x555a, 5, 4);
+ STORE_BY_BITMASK(uint16, &x[1], 0x555a, 5, 4);
if (x[0] != 0 || x[1] != 0x54 || x[2] != 0x13 || x[3] != 0) {
printf("bad result of STORE_BY_BITMASK(7, 3): %x %x %x %x\n", x[0], x[1], x[2], x[3]);
return 1;
@@ -33,7 +33,7 @@ extern "C" int test_copyin()
struct csum_inet_test {
const char* data;
size_t length;
- uint16_t csum;
+ uint16 csum;
};
extern "C" int test_csum_inet()
@@ -149,7 +149,7 @@ extern "C" int test_csum_inet()
for (unsigned i = 0; i < ARRAY_SIZE(tests); i++) {
struct csum_inet csum;
csum_inet_init(&csum);
- csum_inet_update(&csum, (const uint8_t*)tests[i].data, tests[i].length);
+ csum_inet_update(&csum, (const uint8*)tests[i].data, tests[i].length);
if (csum_inet_digest(&csum) != tests[i].csum) {
fprintf(stderr, "bad checksum in test #%u, want: %hx, got: %hx\n", i, tests[i].csum, csum_inet_digest(&csum));
return 1;
@@ -166,7 +166,7 @@ int randInt(int start, int end)
extern "C" int test_csum_inet_acc()
{
- uint8_t buffer[128];
+ uint8 buffer[128];
int test;
for (test = 0; test < 256; test++) {