From 9e332771aeaa54eed13a59574a21368f52da604c Mon Sep 17 00:00:00 2001 From: Aleksandr Nogikh Date: Fri, 12 Nov 2021 13:34:18 +0000 Subject: executor: introduce threads without coverage Experiments have shown that the amount of allocated memory has a very big impact on the syz-executor's performance (at least under Linux) - much bigger than was expected. One source of that extra virtual memory is kcov and, in fact, usually we don't need all 16 kcov handles we create. E.g. only 4 are enough for 99.5% progs that syzkaller executes. The biggest consumer of threads - the collide mode doesn't need kcov at all. Let kcov handle be an optional property of a thread, not a mandatory one. Allocate only 3 kcov instances initially (they'll be preserved over forks) and let the forked processes create other kcov instances if they happen to be needed. --- executor/executor.cc | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) (limited to 'executor') diff --git a/executor/executor.cc b/executor/executor.cc index 76b1d284a..6f3d52f54 100644 --- a/executor/executor.cc +++ b/executor/executor.cc @@ -66,7 +66,8 @@ typedef unsigned char uint8; // Note: zircon max fd is 256. // Some common_OS.h files know about this constant for RLIMIT_NOFILE. const int kMaxFd = 250; -const int kMaxThreads = 16; +const int kMaxThreads = 32; +const int kPreallocCoverThreads = 3; // the number of kcov instances to be set up during init const int kInPipeFd = kMaxFd - 1; // remapped from stdin const int kOutPipeFd = kMaxFd - 2; // remapped from stdout const int kCoverFd = kOutPipeFd - kMaxThreads; @@ -242,6 +243,7 @@ struct thread_t { uint32 reserrno; bool fault_injected; cover_t cov; + bool cov_initialized; bool soft_fail_state; }; @@ -342,7 +344,8 @@ static void copyout_call_results(thread_t* th); static void write_call_output(thread_t* th, bool finished); static void write_extra_output(); static void execute_call(thread_t* th); -static void thread_create(thread_t* th, int id); +static void thread_create(thread_t* th, int id, bool need_coverage); +static void thread_setup_cover(thread_t* th); static void* worker_thread(void* arg); static uint64 read_input(uint64** input_posp, bool peek = false); static uint64 read_arg(uint64** input_posp); @@ -450,8 +453,10 @@ int main(int argc, char** argv) if (flag_coverage) { for (int i = 0; i < kMaxThreads; i++) { threads[i].cov.fd = kCoverFd + i; - cover_open(&threads[i].cov, false); - cover_protect(&threads[i].cov); + // Pre-setup coverage collection for some threads. This should be enough for almost + // all programs, for the remaning few ones coverage will be set up when it's needed. + if (i < kPreallocCoverThreads) + thread_setup_cover(&threads[i]); } cover_open(&extra_cov, true); cover_protect(&extra_cov); @@ -881,7 +886,7 @@ thread_t* schedule_call(int call_index, int call_num, bool colliding, uint64 cop for (; i < kMaxThreads; i++) { thread_t* th = &threads[i]; if (!th->created) - thread_create(th, i); + thread_create(th, i, flag_coverage && !colliding); if (event_isset(&th->done)) { if (th->executing) handle_completion(th); @@ -1115,11 +1120,15 @@ void write_extra_output() #endif } -void thread_create(thread_t* th, int id) +void thread_create(thread_t* th, int id, bool need_coverage) { th->created = true; th->id = id; th->executing = false; + // Lazily set up coverage collection. + // It is assumed that actually it's already initialized - with a few rare exceptions. + if (need_coverage) + thread_setup_cover(th); event_init(&th->ready); event_init(&th->done); event_set(&th->done); @@ -1127,6 +1136,15 @@ void thread_create(thread_t* th, int id) thread_start(worker_thread, th); } +void thread_setup_cover(thread_t* th) +{ + if (th->cov_initialized) + return; + cover_open(&th->cov, false); + cover_protect(&th->cov); + th->cov_initialized = true; +} + void* worker_thread(void* arg) { thread_t* th = (thread_t*)arg; -- cgit mrf-deployment