From b5df78dc5d994bc61f1ecee2c5c85313178f392e Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 22 Nov 2018 19:04:06 +0100 Subject: all: support extra coverage Right now syzkaller only supports coverage collected from the threads that execute syscalls. However some useful things happen in background threads, and it would be nice to collect coverage from those threads as well. This change adds extra coverage support to syzkaller. This coverage is not associated with a particular syscall, but rather with the whole program. Executor passes extra coverage over the same ipc mechanism to syz-fuzzer with syscall number set to -1. syz-fuzzer then passes this coverage to syz-manager with the call name "extra". This change requires the following kcov patch: https://github.com/xairy/linux/pull/2 --- executor/executor.cc | 71 ++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 55 insertions(+), 16 deletions(-) (limited to 'executor/executor.cc') diff --git a/executor/executor.cc b/executor/executor.cc index 72f19105c..bb0e8defd 100644 --- a/executor/executor.cc +++ b/executor/executor.cc @@ -58,6 +58,7 @@ const int kOutPipeFd = kMaxFd - 2; // remapped from stdout const int kCoverFd = kOutPipeFd - kMaxThreads; const int kMaxArgs = 9; const int kCoverSize = 256 << 10; +const int kExtraCoverSize = 256 << 10; const int kFailStatus = 67; const int kRetryStatus = 69; const int kErrorStatus = 68; @@ -208,6 +209,8 @@ struct thread_t { static thread_t threads[kMaxThreads]; static thread_t* last_scheduled; +static cover_t extra_cov; + struct res_t { bool executed; uint64 val; @@ -288,6 +291,7 @@ static thread_t* schedule_call(int call_index, int call_num, bool colliding, uin static void handle_completion(thread_t* th); static void copyout_call_results(thread_t* th); static void write_call_output(thread_t* th, bool finished); +static void write_extra_output(); static void execute_call(thread_t* th); static void thread_create(thread_t* th, int id); static void* worker_thread(void* arg); @@ -366,8 +370,11 @@ int main(int argc, char** argv) if (flag_cover) { for (int i = 0; i < kMaxThreads; i++) { threads[i].cov.fd = kCoverFd + i; - cover_open(&threads[i].cov); + cover_open(&threads[i].cov, false); } + cover_open(&extra_cov, true); + // Don't enable comps because we don't use them in the fuzzer yet. + cover_enable(&extra_cov, false, true); } int status = 0; @@ -557,8 +564,11 @@ void execute_one() retry: uint64* input_pos = (uint64*)input_data; - if (flag_cover && !colliding && !flag_threaded) - cover_enable(&threads[0].cov, flag_collect_comps); + if (flag_cover && !colliding) { + if (!flag_threaded) + cover_enable(&threads[0].cov, flag_collect_comps, false); + cover_reset(&extra_cov); + } int call_index = 0; for (;;) { @@ -719,6 +729,7 @@ retry: write_call_output(th, false); } } + write_extra_output(); } } @@ -766,21 +777,21 @@ thread_t* schedule_call(int call_index, int call_num, bool colliding, uint64 cop } #if SYZ_EXECUTOR_USES_SHMEM -template -void write_coverage_signal(thread_t* th, uint32* signal_count_pos, uint32* cover_count_pos) +template +void write_coverage_signal(cover_t* cov, uint32* signal_count_pos, uint32* cover_count_pos) { // Write out feedback signals. // Currently it is code edges computed as xor of two subsequent basic block PCs. - cover_t* cover_data = ((cover_t*)th->cov.data) + 1; + cover_data_t* cover_data = ((cover_data_t*)cov->data) + 1; uint32 nsig = 0; - cover_t prev = 0; - for (uint32 i = 0; i < th->cov.size; i++) { - cover_t pc = cover_data[i]; + cover_data_t prev = 0; + for (uint32 i = 0; i < cov->size; i++) { + cover_data_t pc = cover_data[i]; if (!cover_check(pc)) { debug("got bad pc: 0x%llx\n", (uint64)pc); doexit(0); } - cover_t sig = pc ^ prev; + cover_data_t sig = pc ^ prev; prev = hash(pc); if (dedup(sig)) continue; @@ -793,9 +804,9 @@ void write_coverage_signal(thread_t* th, uint32* signal_count_pos, uint32* cover if (!flag_collect_cover) return; // Write out real coverage (basic block PCs). - uint32 cover_size = th->cov.size; + uint32 cover_size = cov->size; if (flag_dedup_cover) { - cover_t* end = cover_data + cover_size; + cover_data_t* end = cover_data + cover_size; std::sort(cover_data, end); cover_size = std::unique(cover_data, end) - cover_data; } @@ -814,8 +825,10 @@ void handle_completion(thread_t* th) event_isset(&th->ready), event_isset(&th->done), th->executing); if (th->res != (long)-1) copyout_call_results(th); - if (!collide && !th->colliding) + if (!collide && !th->colliding) { write_call_output(th, true); + write_extra_output(); + } th->executing = false; running--; if (running < 0) @@ -894,9 +907,9 @@ void write_call_output(thread_t* th, bool finished) *comps_count_pos = comps_size; } else if (flag_cover) { if (is_kernel_64_bit) - write_coverage_signal(th, signal_count_pos, cover_count_pos); + write_coverage_signal(&th->cov, signal_count_pos, cover_count_pos); else - write_coverage_signal(th, signal_count_pos, cover_count_pos); + write_coverage_signal(&th->cov, signal_count_pos, cover_count_pos); } debug_verbose("out #%u: index=%u num=%u errno=%d finished=%d blocked=%d sig=%u cover=%u comps=%u\n", completed, th->call_index, th->call_num, reserrno, finished, blocked, @@ -922,6 +935,32 @@ void write_call_output(thread_t* th, bool finished) #endif } +void write_extra_output() +{ +#if SYZ_EXECUTOR_USES_SHMEM + if (!flag_cover || flag_collect_comps) + return; + cover_collect(&extra_cov); + if (!extra_cov.size) + return; + write_output(-1); // call index + write_output(-1); // call num + write_output(999); // errno + write_output(0); // call flags + uint32* signal_count_pos = write_output(0); // filled in later + uint32* cover_count_pos = write_output(0); // filled in later + write_output(0); // comps_count_pos + if (is_kernel_64_bit) + write_coverage_signal(&extra_cov, signal_count_pos, cover_count_pos); + else + write_coverage_signal(&extra_cov, signal_count_pos, cover_count_pos); + cover_reset(&extra_cov); + debug_verbose("extra: sig=%u cover=%u\n", *signal_count_pos, *cover_count_pos); + completed++; + write_completed(completed); +#endif +} + void thread_create(thread_t* th, int id) { th->created = true; @@ -939,7 +978,7 @@ void* worker_thread(void* arg) thread_t* th = (thread_t*)arg; if (flag_cover) - cover_enable(&th->cov, flag_collect_comps); + cover_enable(&th->cov, flag_collect_comps, false); for (;;) { event_wait(&th->ready); event_reset(&th->ready); -- cgit mrf-deployment