aboutsummaryrefslogtreecommitdiffstats
path: root/executor/executor.cc
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2024-07-05 17:20:13 +0200
committerDmitry Vyukov <dvyukov@google.com>2024-07-25 13:12:57 +0000
commit4d77b9fe7da3d014943a16cb4b9a4ca3a531521a (patch)
treec37fbf8b50205eb8b830595a621ad4b355e32e9a /executor/executor.cc
parent206f31df2861c47b13a8c05a105afa94bcc7106c (diff)
all: add qemu snapshotting mode
Diffstat (limited to 'executor/executor.cc')
-rw-r--r--executor/executor.cc96
1 files changed, 62 insertions, 34 deletions
diff --git a/executor/executor.cc b/executor/executor.cc
index 17de4e87d..055957e9f 100644
--- a/executor/executor.cc
+++ b/executor/executor.cc
@@ -113,6 +113,8 @@ static void reply_execute(uint32 status);
static void receive_handshake();
#if SYZ_EXECUTOR_USES_FORK_SERVER
+static void SnapshotPrepareParent();
+
// Allocating (and forking) virtual memory for each executed process is expensive, so we only mmap
// the amount we might possibly need for the specific received prog.
const int kMaxOutputComparisons = 14 << 20; // executions with comparsions enabled are usually < 1% of all executions
@@ -143,6 +145,7 @@ struct alignas(8) OutputData {
std::atomic<uint32> size;
std::atomic<uint32> consumed;
std::atomic<uint32> completed;
+ std::atomic<uint32> num_calls;
struct {
// Call index in the test program (they may be out-of-order is some syscalls block).
int index;
@@ -155,6 +158,7 @@ struct alignas(8) OutputData {
size.store(0, std::memory_order_relaxed);
consumed.store(0, std::memory_order_relaxed);
completed.store(0, std::memory_order_relaxed);
+ num_calls.store(0, std::memory_order_relaxed);
}
};
@@ -248,6 +252,7 @@ static bool dedup(uint8 index, uint64 sig);
static uint64 start_time_ms = 0;
static bool flag_debug;
+static bool flag_snapshot;
static bool flag_coverage;
static bool flag_sandbox_none;
static bool flag_sandbox_setuid;
@@ -463,8 +468,10 @@ static bool copyout(char* addr, uint64 size, uint64* res);
static void setup_control_pipes();
static bool coverage_filter(uint64 pc);
static rpc::ComparisonRaw convert(const kcov_comparison_t& cmp);
-static flatbuffers::span<uint8_t> finish_output(OutputData* output, int proc_id, uint64 req_id,
+static flatbuffers::span<uint8_t> finish_output(OutputData* output, int proc_id, uint64 req_id, uint32 num_calls,
uint64 elapsed, uint64 freshness, uint32 status, const std::vector<uint8_t>* process_output);
+static void parse_execute(const execute_req& req);
+static void parse_handshake(const handshake_req& req);
#include "syscalls.h"
@@ -495,6 +502,8 @@ static feature_t features[] = {};
#include "files.h"
#include "subprocess.h"
+#include "snapshot.h"
+
#include "executor_runner.h"
#include "test.h"
@@ -535,44 +544,50 @@ int main(int argc, char** argv)
start_time_ms = current_time_ms();
os_init(argc, argv, (char*)SYZ_DATA_OFFSET, SYZ_NUM_PAGES * SYZ_PAGE_SIZE);
+ use_temporary_dir();
+ install_segv_handler();
current_thread = &threads[0];
- void* mmap_out = mmap(NULL, kMaxInput, PROT_READ, MAP_SHARED, kInFd, 0);
- if (mmap_out == MAP_FAILED)
- fail("mmap of input file failed");
- input_data = static_cast<uint8*>(mmap_out);
+ if (argc > 2 && strcmp(argv[2], "snapshot") == 0) {
+ SnapshotSetup(argv, argc);
+ } else {
+ void* mmap_out = mmap(NULL, kMaxInput, PROT_READ, MAP_SHARED, kInFd, 0);
+ if (mmap_out == MAP_FAILED)
+ fail("mmap of input file failed");
+ input_data = static_cast<uint8*>(mmap_out);
+
+ mmap_output(kInitialOutput);
- mmap_output(kInitialOutput);
- // Prevent test programs to mess with these fds.
- // Due to races in collider mode, a program can e.g. ftruncate one of these fds,
- // which will cause fuzzer to crash.
- close(kInFd);
+ // Prevent test programs to mess with these fds.
+ // Due to races in collider mode, a program can e.g. ftruncate one of these fds,
+ // which will cause fuzzer to crash.
+ close(kInFd);
#if !SYZ_EXECUTOR_USES_FORK_SERVER
- close(kOutFd);
+ // For SYZ_EXECUTOR_USES_FORK_SERVER, close(kOutFd) is invoked in the forked child,
+ // after the program has been received.
+ close(kOutFd);
#endif
- // For SYZ_EXECUTOR_USES_FORK_SERVER, close(kOutFd) is invoked in the forked child,
- // after the program has been received.
- if (fcntl(kMaxSignalFd, F_GETFD) != -1) {
- // Use random addresses for coverage filters to not collide with output_data.
- max_signal.emplace(kMaxSignalFd, reinterpret_cast<void*>(0x110c230000ull));
- close(kMaxSignalFd);
- }
- if (fcntl(kCoverFilterFd, F_GETFD) != -1) {
- cover_filter.emplace(kCoverFilterFd, reinterpret_cast<void*>(0x110f230000ull));
- close(kCoverFilterFd);
- }
+ if (fcntl(kMaxSignalFd, F_GETFD) != -1) {
+ // Use random addresses for coverage filters to not collide with output_data.
+ max_signal.emplace(kMaxSignalFd, reinterpret_cast<void*>(0x110c230000ull));
+ close(kMaxSignalFd);
+ }
+ if (fcntl(kCoverFilterFd, F_GETFD) != -1) {
+ cover_filter.emplace(kCoverFilterFd, reinterpret_cast<void*>(0x110f230000ull));
+ close(kCoverFilterFd);
+ }
- use_temporary_dir();
- install_segv_handler();
- setup_control_pipes();
- receive_handshake();
+ setup_control_pipes();
+ receive_handshake();
#if !SYZ_EXECUTOR_USES_FORK_SERVER
- // We receive/reply handshake when fork server is disabled just to simplify runner logic.
- // It's a bit suboptimal, but no fork server is much slower anyway.
- reply_execute(0);
- receive_execute();
+ // We receive/reply handshake when fork server is disabled just to simplify runner logic.
+ // It's a bit suboptimal, but no fork server is much slower anyway.
+ reply_execute(0);
+ receive_execute();
#endif
+ }
+
if (flag_coverage) {
int create_count = kCoverDefaultCount, mmap_count = create_count;
if (flag_delay_kcov_mmap) {
@@ -694,6 +709,11 @@ void receive_handshake()
ssize_t n = read(kInPipeFd, &req, sizeof(req));
if (n != sizeof(req))
failmsg("handshake read failed", "read=%zu", n);
+ parse_handshake(req);
+}
+
+void parse_handshake(const handshake_req& req)
+{
if (req.magic != kInMagic)
failmsg("bad handshake magic", "magic=0x%llx", req.magic);
#if SYZ_HAVE_SANDBOX_ANDROID
@@ -732,6 +752,11 @@ void receive_execute()
;
if (n != (ssize_t)sizeof(req))
failmsg("control pipe read failed", "read=%zd want=%zd", n, sizeof(req));
+ parse_execute(req);
+}
+
+void parse_execute(const execute_req& req)
+{
request_id = req.id;
flag_collect_signal = req.exec_flags & (1 << 0);
flag_collect_cover = req.exec_flags & (1 << 1);
@@ -759,6 +784,8 @@ bool cover_collection_required()
void reply_execute(uint32 status)
{
+ if (flag_snapshot)
+ SnapshotDone(status == kFailStatus);
if (write(kOutPipeFd, &status, sizeof(status)) != sizeof(status))
fail("control pipe write failed");
}
@@ -781,7 +808,10 @@ void realloc_output_data()
void execute_one()
{
in_execute_one = true;
- realloc_output_data();
+ if (flag_snapshot)
+ SnapshotStart();
+ else
+ realloc_output_data();
output_builder.emplace(output_data, output_size);
uint64 start = current_time_ms();
uint8* input_pos = input_data;
@@ -1272,11 +1302,9 @@ void write_extra_output()
cover_reset(&extra_cov);
}
-flatbuffers::span<uint8_t> finish_output(OutputData* output, int proc_id, uint64 req_id, uint64 elapsed,
+flatbuffers::span<uint8_t> finish_output(OutputData* output, int proc_id, uint64 req_id, uint32 num_calls, uint64 elapsed,
uint64 freshness, uint32 status, const std::vector<uint8_t>* process_output)
{
- uint8* prog_data = input_data;
- uint32 num_calls = read_input(&prog_data);
int output_size = output->size.load(std::memory_order_relaxed) ?: kMaxOutput;
uint32 completed = output->completed.load(std::memory_order_relaxed);
completed = std::min(completed, kMaxCalls);