From b5df78dc5d994bc61f1ecee2c5c85313178f392e Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 22 Nov 2018 19:04:06 +0100 Subject: all: support extra coverage Right now syzkaller only supports coverage collected from the threads that execute syscalls. However some useful things happen in background threads, and it would be nice to collect coverage from those threads as well. This change adds extra coverage support to syzkaller. This coverage is not associated with a particular syscall, but rather with the whole program. Executor passes extra coverage over the same ipc mechanism to syz-fuzzer with syscall number set to -1. syz-fuzzer then passes this coverage to syz-manager with the call name "extra". This change requires the following kcov patch: https://github.com/xairy/linux/pull/2 --- executor/common_linux.h | 7 ++++- executor/executor.cc | 71 ++++++++++++++++++++++++++++++++++++----------- executor/executor_bsd.h | 4 +-- executor/executor_linux.h | 51 +++++++++++++++++++++++++++------- executor/nocover.h | 4 +-- 5 files changed, 106 insertions(+), 31 deletions(-) (limited to 'executor') diff --git a/executor/common_linux.h b/executor/common_linux.h index 608dc964c..78d1e82a1 100644 --- a/executor/common_linux.h +++ b/executor/common_linux.h @@ -1791,7 +1791,12 @@ static void sandbox_common() #endif struct rlimit rlim; - rlim.rlim_cur = rlim.rlim_max = 200 << 20; +#if SYZ_EXECUTOR + rlim.rlim_cur = rlim.rlim_max = (200 << 20) + + (kMaxThreads * kCoverSize + kExtraCoverSize) * sizeof(void*); +#else + rlim.rlim_cur = rlim.rlim_max = (200 << 20); +#endif setrlimit(RLIMIT_AS, &rlim); rlim.rlim_cur = rlim.rlim_max = 32 << 20; setrlimit(RLIMIT_MEMLOCK, &rlim); diff --git a/executor/executor.cc b/executor/executor.cc index 72f19105c..bb0e8defd 100644 --- a/executor/executor.cc +++ b/executor/executor.cc @@ -58,6 +58,7 @@ const int kOutPipeFd = kMaxFd - 2; // remapped from stdout const int kCoverFd = kOutPipeFd - kMaxThreads; const int kMaxArgs = 9; const int kCoverSize = 256 << 10; +const int kExtraCoverSize = 256 << 10; const int kFailStatus = 67; const int kRetryStatus = 69; const int kErrorStatus = 68; @@ -208,6 +209,8 @@ struct thread_t { static thread_t threads[kMaxThreads]; static thread_t* last_scheduled; +static cover_t extra_cov; + struct res_t { bool executed; uint64 val; @@ -288,6 +291,7 @@ static thread_t* schedule_call(int call_index, int call_num, bool colliding, uin static void handle_completion(thread_t* th); static void copyout_call_results(thread_t* th); static void write_call_output(thread_t* th, bool finished); +static void write_extra_output(); static void execute_call(thread_t* th); static void thread_create(thread_t* th, int id); static void* worker_thread(void* arg); @@ -366,8 +370,11 @@ int main(int argc, char** argv) if (flag_cover) { for (int i = 0; i < kMaxThreads; i++) { threads[i].cov.fd = kCoverFd + i; - cover_open(&threads[i].cov); + cover_open(&threads[i].cov, false); } + cover_open(&extra_cov, true); + // Don't enable comps because we don't use them in the fuzzer yet. + cover_enable(&extra_cov, false, true); } int status = 0; @@ -557,8 +564,11 @@ void execute_one() retry: uint64* input_pos = (uint64*)input_data; - if (flag_cover && !colliding && !flag_threaded) - cover_enable(&threads[0].cov, flag_collect_comps); + if (flag_cover && !colliding) { + if (!flag_threaded) + cover_enable(&threads[0].cov, flag_collect_comps, false); + cover_reset(&extra_cov); + } int call_index = 0; for (;;) { @@ -719,6 +729,7 @@ retry: write_call_output(th, false); } } + write_extra_output(); } } @@ -766,21 +777,21 @@ thread_t* schedule_call(int call_index, int call_num, bool colliding, uint64 cop } #if SYZ_EXECUTOR_USES_SHMEM -template -void write_coverage_signal(thread_t* th, uint32* signal_count_pos, uint32* cover_count_pos) +template +void write_coverage_signal(cover_t* cov, uint32* signal_count_pos, uint32* cover_count_pos) { // Write out feedback signals. // Currently it is code edges computed as xor of two subsequent basic block PCs. - cover_t* cover_data = ((cover_t*)th->cov.data) + 1; + cover_data_t* cover_data = ((cover_data_t*)cov->data) + 1; uint32 nsig = 0; - cover_t prev = 0; - for (uint32 i = 0; i < th->cov.size; i++) { - cover_t pc = cover_data[i]; + cover_data_t prev = 0; + for (uint32 i = 0; i < cov->size; i++) { + cover_data_t pc = cover_data[i]; if (!cover_check(pc)) { debug("got bad pc: 0x%llx\n", (uint64)pc); doexit(0); } - cover_t sig = pc ^ prev; + cover_data_t sig = pc ^ prev; prev = hash(pc); if (dedup(sig)) continue; @@ -793,9 +804,9 @@ void write_coverage_signal(thread_t* th, uint32* signal_count_pos, uint32* cover if (!flag_collect_cover) return; // Write out real coverage (basic block PCs). - uint32 cover_size = th->cov.size; + uint32 cover_size = cov->size; if (flag_dedup_cover) { - cover_t* end = cover_data + cover_size; + cover_data_t* end = cover_data + cover_size; std::sort(cover_data, end); cover_size = std::unique(cover_data, end) - cover_data; } @@ -814,8 +825,10 @@ void handle_completion(thread_t* th) event_isset(&th->ready), event_isset(&th->done), th->executing); if (th->res != (long)-1) copyout_call_results(th); - if (!collide && !th->colliding) + if (!collide && !th->colliding) { write_call_output(th, true); + write_extra_output(); + } th->executing = false; running--; if (running < 0) @@ -894,9 +907,9 @@ void write_call_output(thread_t* th, bool finished) *comps_count_pos = comps_size; } else if (flag_cover) { if (is_kernel_64_bit) - write_coverage_signal(th, signal_count_pos, cover_count_pos); + write_coverage_signal(&th->cov, signal_count_pos, cover_count_pos); else - write_coverage_signal(th, signal_count_pos, cover_count_pos); + write_coverage_signal(&th->cov, signal_count_pos, cover_count_pos); } debug_verbose("out #%u: index=%u num=%u errno=%d finished=%d blocked=%d sig=%u cover=%u comps=%u\n", completed, th->call_index, th->call_num, reserrno, finished, blocked, @@ -922,6 +935,32 @@ void write_call_output(thread_t* th, bool finished) #endif } +void write_extra_output() +{ +#if SYZ_EXECUTOR_USES_SHMEM + if (!flag_cover || flag_collect_comps) + return; + cover_collect(&extra_cov); + if (!extra_cov.size) + return; + write_output(-1); // call index + write_output(-1); // call num + write_output(999); // errno + write_output(0); // call flags + uint32* signal_count_pos = write_output(0); // filled in later + uint32* cover_count_pos = write_output(0); // filled in later + write_output(0); // comps_count_pos + if (is_kernel_64_bit) + write_coverage_signal(&extra_cov, signal_count_pos, cover_count_pos); + else + write_coverage_signal(&extra_cov, signal_count_pos, cover_count_pos); + cover_reset(&extra_cov); + debug_verbose("extra: sig=%u cover=%u\n", *signal_count_pos, *cover_count_pos); + completed++; + write_completed(completed); +#endif +} + void thread_create(thread_t* th, int id) { th->created = true; @@ -939,7 +978,7 @@ void* worker_thread(void* arg) thread_t* th = (thread_t*)arg; if (flag_cover) - cover_enable(&th->cov, flag_collect_comps); + cover_enable(&th->cov, flag_collect_comps, false); for (;;) { event_wait(&th->ready); event_reset(&th->ready); diff --git a/executor/executor_bsd.h b/executor/executor_bsd.h index e0fce2c1e..3af57fa5b 100644 --- a/executor/executor_bsd.h +++ b/executor/executor_bsd.h @@ -54,7 +54,7 @@ static long execute_syscall(const call_t* c, long a[kMaxArgs]) #if GOOS_freebsd || GOOS_openbsd -static void cover_open(cover_t* cov) +static void cover_open(cover_t* cov, bool extra) { int fd = open("/dev/kcov", O_RDWR); if (fd == -1) @@ -85,7 +85,7 @@ static void cover_open(cover_t* cov) cov->data_end = cov->data + mmap_alloc_size; } -static void cover_enable(cover_t* cov, bool collect_comps) +static void cover_enable(cover_t* cov, bool collect_comps, bool extra) { int kcov_mode = collect_comps ? KCOV_MODE_TRACE_CMP : KCOV_MODE_TRACE_PC; #if GOOS_freebsd diff --git a/executor/executor_linux.h b/executor/executor_linux.h index c7af48144..68b143ba0 100644 --- a/executor/executor_linux.h +++ b/executor/executor_linux.h @@ -11,13 +11,30 @@ #include #include +const unsigned long KCOV_TRACE_PC = 0; +const unsigned long KCOV_TRACE_CMP = 1; + +template +struct kcov_remote_arg { + unsigned trace_mode; + unsigned area_size; + unsigned num_handles; + __u64 common_handle; + __u64 handles[N]; +}; + #define KCOV_INIT_TRACE32 _IOR('c', 1, uint32) #define KCOV_INIT_TRACE64 _IOR('c', 1, uint64) #define KCOV_ENABLE _IO('c', 100) #define KCOV_DISABLE _IO('c', 101) +#define KCOV_REMOTE_ENABLE _IOW('c', 102, struct kcov_remote_arg<0>) -const unsigned long KCOV_TRACE_PC = 0; -const unsigned long KCOV_TRACE_CMP = 1; +#define KCOV_REMOTE_HANDLE_USB 0x4242000000000000ull + +static inline __u64 kcov_remote_handle_usb(int bus) +{ + return KCOV_REMOTE_HANDLE_USB + (__u64)bus; +} static bool detect_kernel_bitness(); @@ -38,7 +55,7 @@ static long execute_syscall(const call_t* c, long a[kMaxArgs]) return syscall(c->sys_nr, a[0], a[1], a[2], a[3], a[4], a[5]); } -static void cover_open(cover_t* cov) +static void cover_open(cover_t* cov, bool extra) { int fd = open("/sys/kernel/debug/kcov", O_RDWR); if (fd == -1) @@ -47,9 +64,10 @@ static void cover_open(cover_t* cov) fail("filed to dup2(%d, %d) cover fd", fd, cov->fd); close(fd); const int kcov_init_trace = is_kernel_64_bit ? KCOV_INIT_TRACE64 : KCOV_INIT_TRACE32; - if (ioctl(cov->fd, kcov_init_trace, kCoverSize)) + const int cover_size = extra ? kExtraCoverSize : kCoverSize; + if (ioctl(cov->fd, kcov_init_trace, cover_size)) fail("cover init trace write failed"); - size_t mmap_alloc_size = kCoverSize * (is_kernel_64_bit ? 8 : 4); + size_t mmap_alloc_size = cover_size * (is_kernel_64_bit ? 8 : 4); cov->data = (char*)mmap(NULL, mmap_alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED, cov->fd, 0); if (cov->data == MAP_FAILED) @@ -57,15 +75,28 @@ static void cover_open(cover_t* cov) cov->data_end = cov->data + mmap_alloc_size; } -static void cover_enable(cover_t* cov, bool collect_comps) +static void cover_enable(cover_t* cov, bool collect_comps, bool extra) { int kcov_mode = collect_comps ? KCOV_TRACE_CMP : KCOV_TRACE_PC; - // This should be fatal, + // The KCOV_ENABLE call should be fatal, // but in practice ioctl fails with assorted errors (9, 14, 25), // so we use exitf. - if (ioctl(cov->fd, KCOV_ENABLE, kcov_mode)) - exitf("cover enable write trace failed, mode=%d", kcov_mode); - current_cover = cov; + if (!extra) { + if (ioctl(cov->fd, KCOV_ENABLE, kcov_mode)) + exitf("cover enable write trace failed, mode=%d", kcov_mode); + current_cover = cov; + return; + } + struct kcov_remote_arg<1> arg; + memset(&arg, 0, sizeof(arg)); + arg.trace_mode = kcov_mode; + // Coverage buffer size of remote threads. + arg.area_size = kExtraCoverSize * (is_kernel_64_bit ? 8 : 4); + arg.num_handles = 1; + arg.handles[0] = kcov_remote_handle_usb(procid); + arg.common_handle = procid + 1; + if (ioctl(cov->fd, KCOV_REMOTE_ENABLE, &arg)) + exitf("cover enable write trace failed"); } static void cover_reset(cover_t* cov) diff --git a/executor/nocover.h b/executor/nocover.h index 94f3707f0..3b23f66ab 100644 --- a/executor/nocover.h +++ b/executor/nocover.h @@ -1,11 +1,11 @@ // Copyright 2018 syzkaller project authors. All rights reserved. // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. -static void cover_open(cover_t* cov) +static void cover_open(cover_t* cov, bool extra) { } -static void cover_enable(cover_t* cov, bool collect_comps) +static void cover_enable(cover_t* cov, bool collect_comps, bool extra) { } -- cgit mrf-deployment