aboutsummaryrefslogtreecommitdiffstats
path: root/executor
diff options
context:
space:
mode:
Diffstat (limited to 'executor')
-rw-r--r--executor/executor.cc27
-rw-r--r--executor/executor_bsd.h23
-rw-r--r--executor/executor_darwin.h18
-rw-r--r--executor/executor_linux.h32
-rw-r--r--executor/nocover.h2
5 files changed, 46 insertions, 56 deletions
diff --git a/executor/executor.cc b/executor/executor.cc
index bfda1cbd6..b11faa684 100644
--- a/executor/executor.cc
+++ b/executor/executor.cc
@@ -67,8 +67,7 @@ typedef unsigned char uint8;
// Some common_OS.h files know about this constant for RLIMIT_NOFILE.
const int kMaxFd = 250;
const int kMaxThreads = 32;
-const int kPreallocCoverThreads = 3; // the number of kcov instances to be set up during init
-const int kReserveCoverFds = 16; // a compromise between extra fds and the likely neded kcov instances
+const int kPreMmapCoverThreads = 3; // the number of kcov instances to mmap during init
const int kInPipeFd = kMaxFd - 1; // remapped from stdin
const int kOutPipeFd = kMaxFd - 2; // remapped from stdout
const int kCoverFd = kOutPipeFd - kMaxThreads;
@@ -233,6 +232,7 @@ struct call_t {
struct cover_t {
int fd;
uint32 size;
+ uint32 mmap_alloc_size;
char* data;
char* data_end;
// Note: On everything but darwin the first value in data is the count of
@@ -268,7 +268,6 @@ struct thread_t {
uint32 reserrno;
bool fault_injected;
cover_t cov;
- bool cov_initialized;
bool soft_fail_state;
};
@@ -370,7 +369,7 @@ static void write_call_output(thread_t* th, bool finished);
static void write_extra_output();
static void execute_call(thread_t* th);
static void thread_create(thread_t* th, int id, bool need_coverage);
-static void thread_setup_cover(thread_t* th);
+static void thread_mmap_cover(thread_t* th);
static void* worker_thread(void* arg);
static uint64 read_input(uint64** input_posp, bool peek = false);
static uint64 read_arg(uint64** input_posp);
@@ -471,16 +470,15 @@ int main(int argc, char** argv)
if (flag_coverage) {
for (int i = 0; i < kMaxThreads; i++) {
threads[i].cov.fd = kCoverFd + i;
- if (i < kPreallocCoverThreads) {
- // Pre-setup coverage collection for some threads. This should be enough for almost
+ cover_open(&threads[i].cov, false);
+ if (i < kPreMmapCoverThreads) {
+ // Pre-mmap coverage collection for some threads. This should be enough for almost
// all programs, for the remaning few ones coverage will be set up when it's needed.
- thread_setup_cover(&threads[i]);
- } else if (i < kReserveCoverFds) {
- // Ensure that these fds won't be taken during fuzzing or by init routines.
- cover_reserve_fd(&threads[i].cov);
+ thread_mmap_cover(&threads[i]);
}
}
cover_open(&extra_cov, true);
+ cover_mmap(&extra_cov);
cover_protect(&extra_cov);
if (flag_extra_coverage) {
// Don't enable comps because we don't use them in the fuzzer yet.
@@ -1194,7 +1192,7 @@ void thread_create(thread_t* th, int id, bool need_coverage)
// Lazily set up coverage collection.
// It is assumed that actually it's already initialized - with a few rare exceptions.
if (need_coverage)
- thread_setup_cover(th);
+ thread_mmap_cover(th);
event_init(&th->ready);
event_init(&th->done);
event_set(&th->done);
@@ -1202,13 +1200,12 @@ void thread_create(thread_t* th, int id, bool need_coverage)
thread_start(worker_thread, th);
}
-void thread_setup_cover(thread_t* th)
+void thread_mmap_cover(thread_t* th)
{
- if (th->cov_initialized)
+ if (th->cov.data != NULL)
return;
- cover_open(&th->cov, false);
+ cover_mmap(&th->cov);
cover_protect(&th->cov);
- th->cov_initialized = true;
}
void* worker_thread(void* arg)
diff --git a/executor/executor_bsd.h b/executor/executor_bsd.h
index 80b56f317..dc3ebbdd8 100644
--- a/executor/executor_bsd.h
+++ b/executor/executor_bsd.h
@@ -61,7 +61,7 @@ static void cover_open(cover_t* cov, bool extra)
#if GOOS_freebsd
if (ioctl(cov->fd, KIOSETBUFSIZE, kCoverSize))
fail("ioctl init trace write failed");
- size_t mmap_alloc_size = kCoverSize * KCOV_ENTRY_SIZE;
+ cov->mmap_alloc_size = kCoverSize * KCOV_ENTRY_SIZE;
#elif GOOS_openbsd
unsigned long cover_size = kCoverSize;
if (ioctl(cov->fd, KIOSETBUFSIZE, &cover_size))
@@ -73,7 +73,7 @@ static void cover_open(cover_t* cov, bool extra)
if (ioctl(cov->fd, KIOREMOTEATTACH, &args))
fail("ioctl remote attach failed");
}
- size_t mmap_alloc_size = kCoverSize * (is_kernel_64_bit ? 8 : 4);
+ cov->mmap_alloc_size = kCoverSize * (is_kernel_64_bit ? 8 : 4);
#elif GOOS_netbsd
uint64_t cover_size;
if (extra) {
@@ -90,15 +90,20 @@ static void cover_open(cover_t* cov, bool extra)
if (ioctl(cov->fd, KCOV_IOC_SETBUFSIZE, &cover_size))
fail("ioctl init trace write failed");
}
- size_t mmap_alloc_size = cover_size * KCOV_ENTRY_SIZE;
+ cov->mmap_alloc_size = cover_size * KCOV_ENTRY_SIZE;
#endif
+}
- void* mmap_ptr = mmap(NULL, mmap_alloc_size, PROT_READ | PROT_WRITE,
+static void cover_mmap(cover_t* cov)
+{
+ if (cov->data != NULL)
+ fail("cover_mmap invoked on an already mmapped cover_t object");
+ void* mmap_ptr = mmap(NULL, cov->mmap_alloc_size, PROT_READ | PROT_WRITE,
MAP_SHARED, cov->fd, 0);
if (mmap_ptr == MAP_FAILED)
fail("cover mmap failed");
cov->data = (char*)mmap_ptr;
- cov->data_end = cov->data + mmap_alloc_size;
+ cov->data_end = cov->data + cov->mmap_alloc_size;
cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
cov->pc_offset = 0;
}
@@ -164,14 +169,6 @@ static void cover_collect(cover_t* cov)
cov->size = *(uint64*)cov->data;
}
-static void cover_reserve_fd(cover_t* cov)
-{
- int fd = open("/dev/null", O_RDONLY);
- if (fd < 0)
- fail("failed to open /dev/null");
- dup2(fd, cov->fd);
-}
-
static bool use_cover_edges(uint64 pc)
{
return true;
diff --git a/executor/executor_darwin.h b/executor/executor_darwin.h
index d6efe0063..d7c6c4574 100644
--- a/executor/executor_darwin.h
+++ b/executor/executor_darwin.h
@@ -68,10 +68,14 @@ static void cover_open(cover_t* cov, bool extra)
// and we don't care about the counters/nedges modes in XNU.
if (ksancov_mode_trace(cov->fd, max_entries))
fail("ioctl init trace write failed");
+}
+static void cover_mmap(cover_t* cov)
+{
+ if (cov->data != NULL)
+ fail("cover_mmap invoked on an already mmapped cover_t object");
uintptr_t mmap_ptr = 0;
- size_t mmap_alloc_size = 0;
- if (ksancov_map(cov->fd, &mmap_ptr, &mmap_alloc_size))
+ if (ksancov_map(cov->fd, &mmap_ptr, &cov->mmap_alloc_size))
fail("cover mmap failed");
// Sanity check to make sure our assumptions in the max_entries calculation
@@ -80,7 +84,7 @@ static void cover_open(cover_t* cov, bool extra)
fail("mmap allocation size larger than anticipated");
cov->data = (char*)mmap_ptr;
- cov->data_end = cov->data + mmap_alloc_size;
+ cov->data_end = cov->data + cov->mmap_alloc_size;
}
static void cover_protect(cover_t* cov)
@@ -121,11 +125,3 @@ static bool use_cover_edges(uint64 pc)
{
return true;
}
-
-static void cover_reserve_fd(cover_t* cov)
-{
- int fd = open("/dev/null", O_RDONLY);
- if (fd < 0)
- fail("failed to open /dev/null");
- dup2(fd, cov->fd);
-}
diff --git a/executor/executor_linux.h b/executor/executor_linux.h
index 7d2780c0f..8666d929b 100644
--- a/executor/executor_linux.h
+++ b/executor/executor_linux.h
@@ -85,14 +85,7 @@ static void cover_open(cover_t* cov, bool extra)
const int cover_size = extra ? kExtraCoverSize : kCoverSize;
if (ioctl(cov->fd, kcov_init_trace, cover_size))
fail("cover init trace write failed");
- size_t mmap_alloc_size = cover_size * (is_kernel_64_bit ? 8 : 4);
- cov->data = (char*)mmap(NULL, mmap_alloc_size,
- PROT_READ | PROT_WRITE, MAP_SHARED, cov->fd, 0);
- if (cov->data == MAP_FAILED)
- fail("cover mmap failed");
- cov->data_end = cov->data + mmap_alloc_size;
- cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
- cov->pc_offset = 0;
+ cov->mmap_alloc_size = cover_size * (is_kernel_64_bit ? 8 : 4);
}
static void cover_protect(cover_t* cov)
@@ -103,6 +96,21 @@ static void cover_unprotect(cover_t* cov)
{
}
+static void cover_mmap(cover_t* cov)
+{
+ if (cov->data != NULL)
+ fail("cover_mmap invoked on an already mmapped cover_t object");
+ if (cov->mmap_alloc_size == 0)
+ fail("cover_t structure is corrupted");
+ cov->data = (char*)mmap(NULL, cov->mmap_alloc_size,
+ PROT_READ | PROT_WRITE, MAP_SHARED, cov->fd, 0);
+ if (cov->data == MAP_FAILED)
+ exitf("cover mmap failed");
+ cov->data_end = cov->data + cov->mmap_alloc_size;
+ cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
+ cov->pc_offset = 0;
+}
+
static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
{
unsigned int kcov_mode = collect_comps ? KCOV_TRACE_CMP : KCOV_TRACE_PC;
@@ -147,14 +155,6 @@ static void cover_collect(cover_t* cov)
cov->size = *(uint32*)cov->data;
}
-static void cover_reserve_fd(cover_t* cov)
-{
- int fd = open("/dev/null", O_RDONLY);
- if (fd < 0)
- fail("failed to open /dev/null");
- dup2(fd, cov->fd);
-}
-
static bool use_cover_edges(uint32 pc)
{
return true;
diff --git a/executor/nocover.h b/executor/nocover.h
index e60d07fa9..f07f747b6 100644
--- a/executor/nocover.h
+++ b/executor/nocover.h
@@ -21,7 +21,7 @@ static void cover_protect(cover_t* cov)
{
}
-static void cover_reserve_fd(cover_t* cov)
+static void cover_mmap(cover_t* cov)
{
}