aboutsummaryrefslogtreecommitdiffstats
path: root/executor
diff options
context:
space:
mode:
authorDylan Yudaken <dyudaken@gmail.com>2023-07-25 20:34:02 +0100
committerAleksandr Nogikh <nogikh@google.com>2023-07-30 13:35:50 +0000
commit84487a6f58dad25a72c356bd8a8ba455a87ae663 (patch)
tree560f827e12c98e0cc17412b3a869f56319622bb7 /executor
parent458a107b4b78803973245909f1f3ab19081ca63b (diff)
sys/io_uring, executor/common_linux: remove sqes_index in syz_io_uring_submit
This parameter barely increases coverage since the tail is always set to the entry that is written, but it does increase the complexity of the api and seems to reduce coverage when I run it locally. Remove it.
Diffstat (limited to 'executor')
-rw-r--r--executor/common_linux.h29
1 files changed, 12 insertions, 17 deletions
diff --git a/executor/common_linux.h b/executor/common_linux.h
index 62f0b3f2c..42d352a52 100644
--- a/executor/common_linux.h
+++ b/executor/common_linux.h
@@ -1957,6 +1957,10 @@ static long syz_io_uring_setup(volatile long a0, volatile long a1, volatile long
uint32 sqes_sz = setup_params->sq_entries * SIZEOF_IO_URING_SQE;
*sqes_ptr_out = mmap(0, sqes_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd_io_uring, IORING_OFF_SQES);
+ uint32* array = (uint32*)((uintptr_t)*ring_ptr_out + setup_params->sq_off.array);
+ for (uint32 index = 0; index < entries; index++)
+ array[index] = index;
+
return fd_io_uring;
}
@@ -1964,40 +1968,31 @@ static long syz_io_uring_setup(volatile long a0, volatile long a1, volatile long
#if SYZ_EXECUTOR || __NR_syz_io_uring_submit
-static long syz_io_uring_submit(volatile long a0, volatile long a1, volatile long a2, volatile long a3)
+static long syz_io_uring_submit(volatile long a0, volatile long a1, volatile long a2)
{
- // syzlang: syz_io_uring_submit(ring_ptr ring_ptr, sqes_ptr sqes_ptr, sqe ptr[in, io_uring_sqe], sqes_index int32)
- // C: syz_io_uring_submit(char* ring_ptr, io_uring_sqe* sqes_ptr, io_uring_sqe* sqe, uint32 sqes_index)
+ // syzlang: syz_io_uring_submit(ring_ptr ring_ptr, sqes_ptr sqes_ptr, sqe ptr[in, io_uring_sqe])
+ // C: syz_io_uring_submit(char* ring_ptr, io_uring_sqe* sqes_ptr, io_uring_sqe* sqe)
// It is not checked if the ring is full
// Cast to original
char* ring_ptr = (char*)a0; // This will be exposed to offsets in bytes
char* sqes_ptr = (char*)a1;
- char* sqe = (char*)a2;
- uint32 sqes_index = (uint32)a3;
- uint32 sq_ring_entries = *(uint32*)(ring_ptr + SQ_RING_ENTRIES_OFFSET);
- uint32 cq_ring_entries = *(uint32*)(ring_ptr + CQ_RING_ENTRIES_OFFSET);
+ char* sqe = (char*)a2;
- // Compute the sq_array offset
- uint32 sq_array_off = (CQ_CQES_OFFSET + cq_ring_entries * SIZEOF_IO_URING_CQE + 63) & ~63;
+ uint32 sq_ring_mask = *(uint32*)(ring_ptr + SQ_RING_MASK_OFFSET);
+ uint32* sq_tail_ptr = (uint32*)(ring_ptr + SQ_TAIL_OFFSET);
+ uint32 sq_tail = *sq_tail_ptr & sq_ring_mask;
// Get the ptr to the destination for the sqe
- if (sq_ring_entries)
- sqes_index %= sq_ring_entries;
- char* sqe_dest = sqes_ptr + sqes_index * SIZEOF_IO_URING_SQE;
+ char* sqe_dest = sqes_ptr + sq_tail * SIZEOF_IO_URING_SQE;
// Write the sqe entry to its destination in sqes
memcpy(sqe_dest, sqe, SIZEOF_IO_URING_SQE);
// Write the index to the sqe array
- uint32 sq_ring_mask = *(uint32*)(ring_ptr + SQ_RING_MASK_OFFSET);
- uint32* sq_tail_ptr = (uint32*)(ring_ptr + SQ_TAIL_OFFSET);
- uint32 sq_tail = *sq_tail_ptr & sq_ring_mask;
uint32 sq_tail_next = *sq_tail_ptr + 1;
- uint32* sq_array = (uint32*)(ring_ptr + sq_array_off);
- *(sq_array + sq_tail) = sqes_index;
// Advance the tail. Tail is a free-flowing integer and relies on natural wrapping.
// Ensure that the kernel will never see a tail update without the preceeding SQE