aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAleksandr Nogikh <nogikh@google.com>2021-12-02 10:39:37 +0000
committerAleksandr Nogikh <wp32pw@gmail.com>2021-12-03 12:32:03 +0100
commitc7c20675f58e3edaa53538928c0963144fd524e5 (patch)
tree0552f0896774be7b0d933e95432e63ff8ef13ba4
parent6df0f018b545aaf2b9bbcfde8b6e530cf90da9be (diff)
executor: reserve fds that will belong to kcov
As now kcov instances may get set up during fuzzing, performing dup2 in cover_open is no longer safe as it may close some important resource. Prevent that by reserving most of fds that belong to the kcov fds range. Unfortunately we must duplicate the code because of the way kcov implementations are organized.
-rw-r--r--executor/executor.cc11
-rw-r--r--executor/executor_bsd.h8
-rw-r--r--executor/executor_darwin.h8
-rw-r--r--executor/executor_linux.h8
-rw-r--r--executor/nocover.h4
5 files changed, 36 insertions, 3 deletions
diff --git a/executor/executor.cc b/executor/executor.cc
index ac54ce62e..bfda1cbd6 100644
--- a/executor/executor.cc
+++ b/executor/executor.cc
@@ -68,6 +68,7 @@ typedef unsigned char uint8;
const int kMaxFd = 250;
const int kMaxThreads = 32;
const int kPreallocCoverThreads = 3; // the number of kcov instances to be set up during init
+const int kReserveCoverFds = 16; // a compromise between extra fds and the likely neded kcov instances
const int kInPipeFd = kMaxFd - 1; // remapped from stdin
const int kOutPipeFd = kMaxFd - 2; // remapped from stdout
const int kCoverFd = kOutPipeFd - kMaxThreads;
@@ -470,10 +471,14 @@ int main(int argc, char** argv)
if (flag_coverage) {
for (int i = 0; i < kMaxThreads; i++) {
threads[i].cov.fd = kCoverFd + i;
- // Pre-setup coverage collection for some threads. This should be enough for almost
- // all programs, for the remaning few ones coverage will be set up when it's needed.
- if (i < kPreallocCoverThreads)
+ if (i < kPreallocCoverThreads) {
+ // Pre-setup coverage collection for some threads. This should be enough for almost
+ // all programs, for the remaning few ones coverage will be set up when it's needed.
thread_setup_cover(&threads[i]);
+ } else if (i < kReserveCoverFds) {
+ // Ensure that these fds won't be taken during fuzzing or by init routines.
+ cover_reserve_fd(&threads[i].cov);
+ }
}
cover_open(&extra_cov, true);
cover_protect(&extra_cov);
diff --git a/executor/executor_bsd.h b/executor/executor_bsd.h
index db02c61b7..80b56f317 100644
--- a/executor/executor_bsd.h
+++ b/executor/executor_bsd.h
@@ -164,6 +164,14 @@ static void cover_collect(cover_t* cov)
cov->size = *(uint64*)cov->data;
}
+static void cover_reserve_fd(cover_t* cov)
+{
+ int fd = open("/dev/null", O_RDONLY);
+ if (fd < 0)
+ fail("failed to open /dev/null");
+ dup2(fd, cov->fd);
+}
+
static bool use_cover_edges(uint64 pc)
{
return true;
diff --git a/executor/executor_darwin.h b/executor/executor_darwin.h
index c16691663..d6efe0063 100644
--- a/executor/executor_darwin.h
+++ b/executor/executor_darwin.h
@@ -121,3 +121,11 @@ static bool use_cover_edges(uint64 pc)
{
return true;
}
+
+static void cover_reserve_fd(cover_t* cov)
+{
+ int fd = open("/dev/null", O_RDONLY);
+ if (fd < 0)
+ fail("failed to open /dev/null");
+ dup2(fd, cov->fd);
+}
diff --git a/executor/executor_linux.h b/executor/executor_linux.h
index bd43f2a5b..7d2780c0f 100644
--- a/executor/executor_linux.h
+++ b/executor/executor_linux.h
@@ -147,6 +147,14 @@ static void cover_collect(cover_t* cov)
cov->size = *(uint32*)cov->data;
}
+static void cover_reserve_fd(cover_t* cov)
+{
+ int fd = open("/dev/null", O_RDONLY);
+ if (fd < 0)
+ fail("failed to open /dev/null");
+ dup2(fd, cov->fd);
+}
+
static bool use_cover_edges(uint32 pc)
{
return true;
diff --git a/executor/nocover.h b/executor/nocover.h
index da0d0e2f7..e60d07fa9 100644
--- a/executor/nocover.h
+++ b/executor/nocover.h
@@ -21,6 +21,10 @@ static void cover_protect(cover_t* cov)
{
}
+static void cover_reserve_fd(cover_t* cov)
+{
+}
+
#if SYZ_EXECUTOR_USES_SHMEM
static void cover_unprotect(cover_t* cov)
{