aboutsummaryrefslogtreecommitdiffstats
path: root/executor
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2024-11-20 08:43:01 +0100
committerDmitry Vyukov <dvyukov@google.com>2024-11-20 11:33:58 +0000
commit4fca1650892b7aba6ac219ce521543d411cf96ac (patch)
tree668b39c6f72c509a44dabc13c0fcdcac386a1f81 /executor
parentf56b4dcc82d7af38bf94d643c5750cf49a91a297 (diff)
executor: increase coverage buffer size
The coverage buffer frequently overflows. We cannot increase it radically b/c they consume lots of memory (num procs x num kcovs x buffer size) and lead to OOM kills (at least with 8 procs and 2GB KASAN VM). So increase it 2x and slightly reduce number of threads/kcov descriptors. However, in snapshot mode we can be more aggressive (only 1 proc). This reduces number of overflows by ~~2-4x depending on syscall.
Diffstat (limited to 'executor')
-rw-r--r--executor/executor.cc13
-rw-r--r--executor/executor_linux.h9
-rw-r--r--executor/snapshot.h2
3 files changed, 15 insertions, 9 deletions
diff --git a/executor/executor.cc b/executor/executor.cc
index 603f23f73..750858be2 100644
--- a/executor/executor.cc
+++ b/executor/executor.cc
@@ -72,13 +72,13 @@ const int kOutPipeFd = kMaxFd - 2; // remapped from stdout
const int kCoverFd = kOutPipeFd - kMaxThreads;
const int kExtraCoverFd = kCoverFd - 1;
const int kMaxArgs = 9;
-const int kCoverSize = 256 << 10;
+const int kCoverSize = 512 << 10;
const int kFailStatus = 67;
// Two approaches of dealing with kcov memory.
-const int kCoverOptimizedCount = 12; // the number of kcov instances to be opened inside main()
+const int kCoverOptimizedCount = 8; // the max number of kcov instances
const int kCoverOptimizedPreMmap = 3; // this many will be mmapped inside main(), others - when needed.
-const int kCoverDefaultCount = 6; // otherwise we only init kcov instances inside main()
+const int kCoverDefaultCount = 6; // the max number of kcov instances when delayed kcov mmap is not available
// Logical error (e.g. invalid input program), use as an assert() alternative.
// If such error happens 10+ times in a row, it will be detected as a bug by the runner process.
@@ -1129,6 +1129,8 @@ uint32 write_signal(flatbuffers::FlatBufferBuilder& fbb, int index, cover_t* cov
// Currently it is code edges computed as xor of two subsequent basic block PCs.
fbb.StartVector(0, sizeof(uint64));
cover_data_t* cover_data = (cover_data_t*)(cov->data + cov->data_offset);
+ if ((char*)(cover_data + cov->size) > cov->data_end)
+ failmsg("too much cover", "cov=%u", cov->size);
uint32 nsig = 0;
cover_data_t prev_pc = 0;
bool prev_filter = true;
@@ -1468,11 +1470,8 @@ void execute_call(thread_t* th)
// Reset the flag before the first possible fail().
th->soft_fail_state = false;
- if (flag_coverage) {
+ if (flag_coverage)
cover_collect(&th->cov);
- if (th->cov.size >= kCoverSize)
- failmsg("too much cover", "thr=%d, cov=%u", th->id, th->cov.size);
- }
th->fault_injected = false;
if (th->call_props.fail_nth > 0)
diff --git a/executor/executor_linux.h b/executor/executor_linux.h
index f2e9d4df6..e952e6ea9 100644
--- a/executor/executor_linux.h
+++ b/executor/executor_linux.h
@@ -13,6 +13,12 @@
static bool pkeys_enabled;
+// The coverage buffer can realistically overflow. In the non-snapshot mode we cannot afford
+// very large buffer b/c there are usually multiple procs, and each of them consumes
+// significant amount of memory. In snapshot mode we have only one proc, so we can have
+// larger coverage buffer.
+const int kSnapshotCoverSize = 1024 << 10;
+
const unsigned long KCOV_TRACE_PC = 0;
const unsigned long KCOV_TRACE_CMP = 1;
@@ -101,7 +107,8 @@ static void cover_open(cover_t* cov, bool extra)
failmsg("filed to dup cover fd", "from=%d, to=%d", fd, cov->fd);
close(fd);
const int kcov_init_trace = is_kernel_64_bit ? KCOV_INIT_TRACE64 : KCOV_INIT_TRACE32;
- const int cover_size = extra ? kExtraCoverSize : kCoverSize;
+ const int cover_size = extra ? kExtraCoverSize : flag_snapshot ? kSnapshotCoverSize
+ : kCoverSize;
if (ioctl(cov->fd, kcov_init_trace, cover_size))
fail("cover init trace write failed");
cov->mmap_alloc_size = cover_size * (is_kernel_64_bit ? 8 : 4);
diff --git a/executor/snapshot.h b/executor/snapshot.h
index 462fb2c56..059ac8222 100644
--- a/executor/snapshot.h
+++ b/executor/snapshot.h
@@ -147,7 +147,7 @@ constexpr size_t kOutputPopulate = 256 << 10;
constexpr size_t kInputPopulate = 64 << 10;
constexpr size_t kGlobalsPopulate = 4 << 10;
constexpr size_t kDataPopulate = 8 << 10;
-constexpr size_t kCoveragePopulate = 32 << 10;
+constexpr size_t kCoveragePopulate = 64 << 10;
constexpr size_t kThreadsPopulate = 2;
static void SnapshotSetState(rpc::SnapshotState state)