aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--executor/common.h11
-rw-r--r--executor/executor.cc96
-rw-r--r--executor/executor_runner.h3
-rw-r--r--executor/files.h16
-rw-r--r--executor/snapshot.h252
-rw-r--r--pkg/flatrpc/flatrpc.fbs53
-rw-r--r--pkg/flatrpc/flatrpc.go557
-rw-r--r--pkg/flatrpc/flatrpc.h520
-rw-r--r--pkg/flatrpc/helpers.go10
-rw-r--r--pkg/log/log.go4
-rw-r--r--pkg/mgrconfig/load.go44
-rw-r--r--pkg/rpcserver/rpcserver.go6
-rw-r--r--syz-manager/manager.go29
-rw-r--r--syz-manager/snapshot.go175
-rw-r--r--vm/qemu/qemu.go20
-rw-r--r--vm/qemu/snapshot_linux.go246
-rw-r--r--vm/qemu/snapshot_unimpl.go33
17 files changed, 2014 insertions, 61 deletions
diff --git a/executor/common.h b/executor/common.h
index a38768536..123723e5a 100644
--- a/executor/common.h
+++ b/executor/common.h
@@ -613,7 +613,8 @@ static void loop(void)
#endif
#if SYZ_EXECUTOR
// Tell parent that we are ready to serve.
- reply_execute(0);
+ if (!flag_snapshot)
+ reply_execute(0);
#endif
int iter = 0;
#if SYZ_REPEAT_TIMES
@@ -632,7 +633,8 @@ static void loop(void)
reset_loop();
#endif
#if SYZ_EXECUTOR
- receive_execute();
+ if (!flag_snapshot)
+ receive_execute();
#endif
int pid = fork();
if (pid < 0)
@@ -663,6 +665,11 @@ static void loop(void)
}
debug("spawned worker pid %d\n", pid);
+#if SYZ_EXECUTOR
+ if (flag_snapshot)
+ SnapshotPrepareParent();
+#endif
+
// We used to use sigtimedwait(SIGCHLD) to wait for the subprocess.
// But SIGCHLD is also delivered when a process stops/continues,
// so it would require a loop with status analysis and timeout recalculation.
diff --git a/executor/executor.cc b/executor/executor.cc
index 17de4e87d..055957e9f 100644
--- a/executor/executor.cc
+++ b/executor/executor.cc
@@ -113,6 +113,8 @@ static void reply_execute(uint32 status);
static void receive_handshake();
#if SYZ_EXECUTOR_USES_FORK_SERVER
+static void SnapshotPrepareParent();
+
// Allocating (and forking) virtual memory for each executed process is expensive, so we only mmap
// the amount we might possibly need for the specific received prog.
const int kMaxOutputComparisons = 14 << 20; // executions with comparsions enabled are usually < 1% of all executions
@@ -143,6 +145,7 @@ struct alignas(8) OutputData {
std::atomic<uint32> size;
std::atomic<uint32> consumed;
std::atomic<uint32> completed;
+ std::atomic<uint32> num_calls;
struct {
// Call index in the test program (they may be out-of-order is some syscalls block).
int index;
@@ -155,6 +158,7 @@ struct alignas(8) OutputData {
size.store(0, std::memory_order_relaxed);
consumed.store(0, std::memory_order_relaxed);
completed.store(0, std::memory_order_relaxed);
+ num_calls.store(0, std::memory_order_relaxed);
}
};
@@ -248,6 +252,7 @@ static bool dedup(uint8 index, uint64 sig);
static uint64 start_time_ms = 0;
static bool flag_debug;
+static bool flag_snapshot;
static bool flag_coverage;
static bool flag_sandbox_none;
static bool flag_sandbox_setuid;
@@ -463,8 +468,10 @@ static bool copyout(char* addr, uint64 size, uint64* res);
static void setup_control_pipes();
static bool coverage_filter(uint64 pc);
static rpc::ComparisonRaw convert(const kcov_comparison_t& cmp);
-static flatbuffers::span<uint8_t> finish_output(OutputData* output, int proc_id, uint64 req_id,
+static flatbuffers::span<uint8_t> finish_output(OutputData* output, int proc_id, uint64 req_id, uint32 num_calls,
uint64 elapsed, uint64 freshness, uint32 status, const std::vector<uint8_t>* process_output);
+static void parse_execute(const execute_req& req);
+static void parse_handshake(const handshake_req& req);
#include "syscalls.h"
@@ -495,6 +502,8 @@ static feature_t features[] = {};
#include "files.h"
#include "subprocess.h"
+#include "snapshot.h"
+
#include "executor_runner.h"
#include "test.h"
@@ -535,44 +544,50 @@ int main(int argc, char** argv)
start_time_ms = current_time_ms();
os_init(argc, argv, (char*)SYZ_DATA_OFFSET, SYZ_NUM_PAGES * SYZ_PAGE_SIZE);
+ use_temporary_dir();
+ install_segv_handler();
current_thread = &threads[0];
- void* mmap_out = mmap(NULL, kMaxInput, PROT_READ, MAP_SHARED, kInFd, 0);
- if (mmap_out == MAP_FAILED)
- fail("mmap of input file failed");
- input_data = static_cast<uint8*>(mmap_out);
+ if (argc > 2 && strcmp(argv[2], "snapshot") == 0) {
+ SnapshotSetup(argv, argc);
+ } else {
+ void* mmap_out = mmap(NULL, kMaxInput, PROT_READ, MAP_SHARED, kInFd, 0);
+ if (mmap_out == MAP_FAILED)
+ fail("mmap of input file failed");
+ input_data = static_cast<uint8*>(mmap_out);
+
+ mmap_output(kInitialOutput);
- mmap_output(kInitialOutput);
- // Prevent test programs to mess with these fds.
- // Due to races in collider mode, a program can e.g. ftruncate one of these fds,
- // which will cause fuzzer to crash.
- close(kInFd);
+ // Prevent test programs to mess with these fds.
+ // Due to races in collider mode, a program can e.g. ftruncate one of these fds,
+ // which will cause fuzzer to crash.
+ close(kInFd);
#if !SYZ_EXECUTOR_USES_FORK_SERVER
- close(kOutFd);
+ // For SYZ_EXECUTOR_USES_FORK_SERVER, close(kOutFd) is invoked in the forked child,
+ // after the program has been received.
+ close(kOutFd);
#endif
- // For SYZ_EXECUTOR_USES_FORK_SERVER, close(kOutFd) is invoked in the forked child,
- // after the program has been received.
- if (fcntl(kMaxSignalFd, F_GETFD) != -1) {
- // Use random addresses for coverage filters to not collide with output_data.
- max_signal.emplace(kMaxSignalFd, reinterpret_cast<void*>(0x110c230000ull));
- close(kMaxSignalFd);
- }
- if (fcntl(kCoverFilterFd, F_GETFD) != -1) {
- cover_filter.emplace(kCoverFilterFd, reinterpret_cast<void*>(0x110f230000ull));
- close(kCoverFilterFd);
- }
+ if (fcntl(kMaxSignalFd, F_GETFD) != -1) {
+ // Use random addresses for coverage filters to not collide with output_data.
+ max_signal.emplace(kMaxSignalFd, reinterpret_cast<void*>(0x110c230000ull));
+ close(kMaxSignalFd);
+ }
+ if (fcntl(kCoverFilterFd, F_GETFD) != -1) {
+ cover_filter.emplace(kCoverFilterFd, reinterpret_cast<void*>(0x110f230000ull));
+ close(kCoverFilterFd);
+ }
- use_temporary_dir();
- install_segv_handler();
- setup_control_pipes();
- receive_handshake();
+ setup_control_pipes();
+ receive_handshake();
#if !SYZ_EXECUTOR_USES_FORK_SERVER
- // We receive/reply handshake when fork server is disabled just to simplify runner logic.
- // It's a bit suboptimal, but no fork server is much slower anyway.
- reply_execute(0);
- receive_execute();
+ // We receive/reply handshake when fork server is disabled just to simplify runner logic.
+ // It's a bit suboptimal, but no fork server is much slower anyway.
+ reply_execute(0);
+ receive_execute();
#endif
+ }
+
if (flag_coverage) {
int create_count = kCoverDefaultCount, mmap_count = create_count;
if (flag_delay_kcov_mmap) {
@@ -694,6 +709,11 @@ void receive_handshake()
ssize_t n = read(kInPipeFd, &req, sizeof(req));
if (n != sizeof(req))
failmsg("handshake read failed", "read=%zu", n);
+ parse_handshake(req);
+}
+
+void parse_handshake(const handshake_req& req)
+{
if (req.magic != kInMagic)
failmsg("bad handshake magic", "magic=0x%llx", req.magic);
#if SYZ_HAVE_SANDBOX_ANDROID
@@ -732,6 +752,11 @@ void receive_execute()
;
if (n != (ssize_t)sizeof(req))
failmsg("control pipe read failed", "read=%zd want=%zd", n, sizeof(req));
+ parse_execute(req);
+}
+
+void parse_execute(const execute_req& req)
+{
request_id = req.id;
flag_collect_signal = req.exec_flags & (1 << 0);
flag_collect_cover = req.exec_flags & (1 << 1);
@@ -759,6 +784,8 @@ bool cover_collection_required()
void reply_execute(uint32 status)
{
+ if (flag_snapshot)
+ SnapshotDone(status == kFailStatus);
if (write(kOutPipeFd, &status, sizeof(status)) != sizeof(status))
fail("control pipe write failed");
}
@@ -781,7 +808,10 @@ void realloc_output_data()
void execute_one()
{
in_execute_one = true;
- realloc_output_data();
+ if (flag_snapshot)
+ SnapshotStart();
+ else
+ realloc_output_data();
output_builder.emplace(output_data, output_size);
uint64 start = current_time_ms();
uint8* input_pos = input_data;
@@ -1272,11 +1302,9 @@ void write_extra_output()
cover_reset(&extra_cov);
}
-flatbuffers::span<uint8_t> finish_output(OutputData* output, int proc_id, uint64 req_id, uint64 elapsed,
+flatbuffers::span<uint8_t> finish_output(OutputData* output, int proc_id, uint64 req_id, uint32 num_calls, uint64 elapsed,
uint64 freshness, uint32 status, const std::vector<uint8_t>* process_output)
{
- uint8* prog_data = input_data;
- uint32 num_calls = read_input(&prog_data);
int output_size = output->size.load(std::memory_order_relaxed) ?: kMaxOutput;
uint32 completed = output->completed.load(std::memory_order_relaxed);
completed = std::min(completed, kMaxCalls);
diff --git a/executor/executor_runner.h b/executor/executor_runner.h
index 260d4a5de..96fcc9b44 100644
--- a/executor/executor_runner.h
+++ b/executor/executor_runner.h
@@ -352,7 +352,8 @@ private:
output_.insert(output_.end(), tmp, tmp + strlen(tmp));
}
}
- auto data = finish_output(resp_mem_, id_, msg_->id, elapsed, freshness_++, status, output);
+ uint32 num_calls = read_input(&prog_data);
+ auto data = finish_output(resp_mem_, id_, msg_->id, num_calls, elapsed, freshness_++, status, output);
conn_.Send(data.data(), data.size());
resp_mem_->Reset();
diff --git a/executor/files.h b/executor/files.h
index f952a07dc..470157e84 100644
--- a/executor/files.h
+++ b/executor/files.h
@@ -9,6 +9,7 @@
#include <errno.h>
#include <fcntl.h>
#include <glob.h>
+#include <stdarg.h>
#include <string.h>
#include <unistd.h>
@@ -58,6 +59,21 @@ static std::unique_ptr<rpc::FileInfoRawT> ReadFile(const std::string& file)
return info;
}
+static std::string ReadTextFile(const char* file_fmt, ...)
+{
+ char file[1024];
+ va_list args;
+ va_start(args, file_fmt);
+ vsnprintf(file, sizeof(file), file_fmt, args);
+ va_end(args);
+ file[sizeof(file) - 1] = 0;
+ auto data = ReadFile(file)->data;
+ std::string str(data.begin(), data.end());
+ while (!str.empty() && (str.back() == '\n' || str.back() == 0))
+ str.resize(str.size() - 1);
+ return str;
+}
+
static std::vector<std::unique_ptr<rpc::FileInfoRawT>> ReadFiles(const std::vector<std::string>& files)
{
std::vector<std::unique_ptr<rpc::FileInfoRawT>> results;
diff --git a/executor/snapshot.h b/executor/snapshot.h
new file mode 100644
index 000000000..5479a162f
--- /dev/null
+++ b/executor/snapshot.h
@@ -0,0 +1,252 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+#include <dirent.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <atomic>
+#include <string>
+#include <utility>
+
+#ifndef MADV_POPULATE_WRITE
+#define MADV_POPULATE_WRITE 23
+#endif
+
+// Size of qemu snapshots and time required to restore a snapshot depend on the amount of memory
+// the VM touches after boot. For example, a 132 MB snapshot takes around 150ms to restore,
+// while a 260 MB snapshot takes around 275 ms to restore.
+//
+// To reduce size of the snapshot it's recommended to use smaller kernel and setup fewer devices.
+// For example the following cmdline arguments:
+// "loop.max_loop=1 dummy_hcd.num=1 vivid.n_devs=2 vivid.multiplanar=1,2 netrom.nr_ndevs=1 rose.rose_ndevs=1"
+// and CONFIG_USBIP_VHCI_NR_HCS=1 help to reduce snapshot by about 20 MB. Note: we have only 1 proc
+// in snapshot mode, so we don't need lots of devices. However, our descriptions rely on vivid.n_devs=16
+// since they hardcode names like /dev/video36 which follow after these 16 pre-created devices.
+//
+// Additionally we could try to use executor as init process, this should remove dhcpd/sshd/udevd/klogd/etc.
+// We don't need even networking in snapshot mode since we communicate via shared memory.
+
+static struct {
+ // Ivshmem interrupt doorbell register.
+ volatile uint32* doorbell;
+ volatile rpc::SnapshotHeaderT* hdr;
+ void* input;
+} ivs;
+
+// Finds qemu ivshmem device, see:
+// https://www.qemu.org/docs/master/specs/ivshmem-spec.html
+static void FindIvshmemDevices()
+{
+ std::string result;
+ DIR* devices = opendir("/sys/bus/pci/devices");
+ if (!devices)
+ fail("opendir(/sys/bus/pci/devices) failed");
+ void* regs = nullptr;
+ void* input = nullptr;
+ void* output = nullptr;
+ while (auto* dev = readdir(devices)) {
+ if (dev->d_name[0] == '.')
+ continue;
+ const std::string& vendor = ReadTextFile("/sys/bus/pci/devices/%s/vendor", dev->d_name);
+ const std::string& device = ReadTextFile("/sys/bus/pci/devices/%s/device", dev->d_name);
+ debug("PCI device %s: vendor=%s device=%s\n", dev->d_name, vendor.c_str(), device.c_str());
+ if (vendor != "0x1af4" || device != "0x1110")
+ continue;
+ char filename[1024];
+ snprintf(filename, sizeof(filename), "/sys/bus/pci/devices/%s/resource2", dev->d_name);
+ int res2 = open(filename, O_RDWR);
+ if (res2 == -1)
+ fail("failed to open ivshmem resource2");
+ struct stat statbuf;
+ if (fstat(res2, &statbuf))
+ fail("failed to fstat ivshmem resource2");
+ debug("ivshmem resource2 size %zu\n", static_cast<size_t>(statbuf.st_size));
+ // The only way to distinguish the 2 ivshmem regions is by size.
+ if (statbuf.st_size == static_cast<uint64>(rpc::Const::SnapshotDoorbellSize)) {
+ snprintf(filename, sizeof(filename), "/sys/bus/pci/devices/%s/resource0", dev->d_name);
+ int res0 = open(filename, O_RDWR);
+ if (res0 == -1)
+ fail("failed to open ivshmem resource0");
+ regs = mmap(nullptr, getpagesize(), PROT_READ | PROT_WRITE, MAP_SHARED, res0, 0);
+ close(res0);
+ if (regs == MAP_FAILED)
+ fail("failed to mmap ivshmem resource0");
+ debug("mapped doorbell registers at %p\n", regs);
+ } else if (statbuf.st_size == static_cast<uint64>(rpc::Const::SnapshotShmemSize)) {
+ input = mmap(nullptr, static_cast<uint64>(rpc::Const::MaxInputSize),
+ PROT_READ, MAP_SHARED, res2, 0);
+ output = mmap(nullptr, static_cast<uint64>(rpc::Const::MaxOutputSize),
+ PROT_READ | PROT_WRITE, MAP_SHARED, res2,
+ static_cast<uint64>(rpc::Const::MaxInputSize));
+ if (input == MAP_FAILED || output == MAP_FAILED)
+ fail("failed to mmap ivshmem resource2");
+ debug("mapped shmem input at at %p/%llu\n",
+ input, static_cast<uint64>(rpc::Const::MaxInputSize));
+ debug("mapped shmem output at at %p/%llu\n",
+ output, static_cast<uint64>(rpc::Const::MaxOutputSize));
+ }
+ close(res2);
+ }
+ closedir(devices);
+ if (regs == nullptr || input == nullptr)
+ fail("cannot find ivshmem PCI devices");
+ ivs.doorbell = static_cast<uint32*>(regs) + 3;
+ ivs.hdr = static_cast<rpc::SnapshotHeaderT*>(output);
+ ivs.input = input;
+ output_data = reinterpret_cast<OutputData*>(static_cast<char*>(output) + sizeof(rpc::SnapshotHeaderT));
+ output_size = static_cast<uint64>(rpc::Const::MaxOutputSize) - sizeof(rpc::SnapshotHeaderT);
+}
+
+static void SnapshotSetup(char** argv, int argc)
+{
+ flag_snapshot = true;
+ // This allows to see debug output during early setup.
+ // If debug is not actually enabled, it will be turned off in parse_handshake.
+ flag_debug = true;
+#if GOOS_linux
+ // In snapshot mode executor output is redirected to /dev/kmsg.
+ // This is required to turn off rate limiting of writes.
+ write_file("/proc/sys/kernel/printk_devkmsg", "on\n");
+#endif
+ FindIvshmemDevices();
+ // Wait for the host to write handshake_req into input memory.
+ while (ivs.hdr->state != rpc::SnapshotState::Handshake)
+ sleep_ms(10);
+ auto msg = flatbuffers::GetRoot<rpc::SnapshotHandshake>(ivs.input);
+ handshake_req req = {
+ .magic = kInMagic,
+ .use_cover_edges = msg->cover_edges(),
+ .is_kernel_64_bit = msg->kernel_64_bit(),
+ .flags = msg->env_flags(),
+ .pid = 0,
+ .sandbox_arg = static_cast<uint64>(msg->sandbox_arg()),
+ .syscall_timeout_ms = static_cast<uint64>(msg->syscall_timeout_ms()),
+ .program_timeout_ms = static_cast<uint64>(msg->program_timeout_ms()),
+ .slowdown_scale = static_cast<uint64>(msg->slowdown()),
+ };
+ parse_handshake(req);
+ for (const auto& feat : features) {
+ if (!(msg->features() & feat.id))
+ continue;
+ debug("setting up feature %s\n", rpc::EnumNameFeature(feat.id));
+ const char* reason = feat.setup();
+ if (reason)
+ failmsg("feature setup failed", "reason: %s", reason);
+ }
+}
+
+constexpr size_t kOutputPopulate = 256 << 10;
+constexpr size_t kInputPopulate = 64 << 10;
+constexpr size_t kGlobalsPopulate = 4 << 10;
+constexpr size_t kDataPopulate = 8 << 10;
+constexpr size_t kCoveragePopulate = 32 << 10;
+constexpr size_t kThreadsPopulate = 2;
+
+static void SnapshotSetState(rpc::SnapshotState state)
+{
+ debug("changing stapshot state %s -> %s\n",
+ rpc::EnumNameSnapshotState(ivs.hdr->state), rpc::EnumNameSnapshotState(state));
+ std::atomic_signal_fence(std::memory_order_seq_cst);
+ ivs.hdr->state = state;
+ // The register contains VM index shifted by 16 (the host part is VM index 1)
+ // + interrup vector index (0 in our case).
+ *ivs.doorbell = 1 << 16;
+}
+
+// PopulateMemory prefaults anon memory (we want to avoid minor page faults as well).
+static void PopulateMemory(void* ptr, size_t size)
+{
+ ptr = (void*)(uintptr_t(ptr) & ~(getpagesize() - 1));
+ if (madvise(ptr, size, MADV_POPULATE_WRITE))
+ failmsg("populate madvise failed", "ptr=%p size=%zu", ptr, size);
+}
+
+// TouchMemory prefaults non-anon shared memory.
+static void TouchMemory(void* ptr, size_t size)
+{
+ size_t const kPageSize = getpagesize();
+ for (size_t i = 0; i < size; i += kPageSize)
+ (void)((volatile char*)ptr)[i];
+}
+
+#if SYZ_EXECUTOR_USES_FORK_SERVER
+static void SnapshotPrepareParent()
+{
+ TouchMemory((char*)output_data + output_size - kOutputPopulate, kOutputPopulate);
+ // Notify SnapshotStart that we finished prefaulting memory in the parent.
+ output_data->completed = 1;
+ // Wait for the request to come, so that we give it full time slice to execute.
+ // This process will start waiting for the child as soon as we return.
+ while (ivs.hdr->state != rpc::SnapshotState::Execute)
+ ;
+}
+#endif
+
+static void SnapshotStart()
+{
+ debug("SnapshotStart\n");
+ // Prefault as much memory as we can before the snapshot is taken.
+ // Also pre-create some threads and let them block.
+ // This is intended to make execution after each snapshot restore faster,
+ // as we won't need to do that duplicate work again and again.
+ flag_threaded = true;
+ for (size_t i = 0; i < kThreadsPopulate; i++) {
+ thread_t* th = &threads[i];
+ thread_create(th, i, flag_coverage);
+ if (flag_coverage)
+ PopulateMemory(th->cov.data, kCoveragePopulate);
+ }
+ TouchMemory((char*)output_data + output_size - kOutputPopulate, kOutputPopulate);
+ TouchMemory(ivs.input, kInputPopulate);
+ PopulateMemory(&flag_coverage, kGlobalsPopulate);
+ PopulateMemory((void*)SYZ_DATA_OFFSET, kDataPopulate);
+ sleep_ms(100); // let threads start and block
+ // Wait for the parent process to prefault as well.
+ while (!output_data->completed)
+ sleep_ms(1);
+ // Notify host that we are ready to be snapshotted.
+ SnapshotSetState(rpc::SnapshotState::Ready);
+ // Snapshot is restored here.
+ // First time we may loop here while the snapshot is taken,
+ // but afterwards we should be restored when the state is already Execute.
+ // Note: we don't use sleep in the loop because we may be snapshotted while in the sleep syscall.
+ // As the result each execution after snapshot restore will be slower as it will need to finish
+ // the sleep and return from the syscall.
+ while (ivs.hdr->state == rpc::SnapshotState::Ready)
+ ;
+ if (ivs.hdr->state == rpc::SnapshotState::Snapshotted) {
+ // First time around, just acknowledge and wait for snapshot restart.
+ SnapshotSetState(rpc::SnapshotState::Executed);
+ for (;;)
+ sleep(1000);
+ }
+ // Resumed for program execution.
+ output_data->Reset();
+ auto msg = flatbuffers::GetRoot<rpc::SnapshotRequest>(ivs.input);
+ execute_req req = {
+ .magic = kInMagic,
+ .id = 0,
+ .exec_flags = static_cast<uint64>(msg->exec_flags()),
+ .all_call_signal = msg->all_call_signal(),
+ .all_extra_signal = msg->all_extra_signal(),
+ };
+ parse_execute(req);
+ output_data->num_calls.store(msg->num_calls(), std::memory_order_relaxed);
+ input_data = const_cast<uint8*>(msg->prog_data()->Data());
+}
+
+NORETURN static void SnapshotDone(bool failed)
+{
+ debug("SnapshotDone\n");
+ uint32 num_calls = output_data->num_calls.load(std::memory_order_relaxed);
+ auto data = finish_output(output_data, 0, 0, num_calls, 0, 0, failed ? kFailStatus : 0, nullptr);
+ ivs.hdr->output_offset = data.data() - reinterpret_cast<volatile uint8_t*>(ivs.hdr);
+ ivs.hdr->output_size = data.size();
+ SnapshotSetState(failed ? rpc::SnapshotState::Failed : rpc::SnapshotState::Executed);
+ // Wait to be restarted from the snapshot.
+ for (;;)
+ sleep(1000);
+}
diff --git a/pkg/flatrpc/flatrpc.fbs b/pkg/flatrpc/flatrpc.fbs
index 98fb2f5da..bd8b023d9 100644
--- a/pkg/flatrpc/flatrpc.fbs
+++ b/pkg/flatrpc/flatrpc.fbs
@@ -3,6 +3,14 @@
namespace rpc;
+// Various consts shared between Go and C++ code.
+enum Const : uint64 {
+ MaxInputSize = 4198400, // 4<<20 + 4<<10
+ MaxOutputSize = 14680064, // 14<<20
+ SnapshotShmemSize = 33554432, // Must be power-of-2 and >=MaxInputSize+MaxOutputSize
+ SnapshotDoorbellSize = 4096, // 4<<10
+}
+
enum Feature : uint64 (bit_flags) {
Coverage,
Comparisons,
@@ -239,3 +247,48 @@ table ExecResultRaw {
table StateResultRaw {
data :[uint8];
}
+
+// SnapshotState is used for synchronization between host/target parts during snapshot execution.
+enum SnapshotState : uint64 {
+ // Initial 0 state.
+ Initial,
+ // Host wrote handshake request data and is ready to take snapshot.
+ Handshake,
+ // Target received handshake request and is ready to be snapshotted.
+ Ready,
+ // Host has taken snapshot.
+ Snapshotted,
+ // Host wrote request data and resumed the target from snapshot.
+ Execute,
+ // Target has finished executing a request and is ready to be reset.
+ Executed,
+ // Target has failed to execute a request.
+ Failed,
+}
+
+// SnapshotHeader is located at the beginning of the snapshot output shared memory region.
+table SnapshotHeader {
+ state :SnapshotState;
+ // Offset and size of the output data after program execution.
+ output_offset :uint32;
+ output_size :uint32;
+}
+
+table SnapshotHandshake {
+ cover_edges :bool;
+ kernel_64_bit :bool;
+ slowdown :int32;
+ syscall_timeout_ms :int32;
+ program_timeout_ms :int32;
+ features :Feature;
+ env_flags :ExecEnv;
+ sandbox_arg :int64;
+}
+
+table SnapshotRequest {
+ exec_flags :ExecFlag;
+ num_calls :int32;
+ all_call_signal :uint64;
+ all_extra_signal :bool;
+ prog_data :[uint8];
+}
diff --git a/pkg/flatrpc/flatrpc.go b/pkg/flatrpc/flatrpc.go
index aa8970ba9..a53d0075d 100644
--- a/pkg/flatrpc/flatrpc.go
+++ b/pkg/flatrpc/flatrpc.go
@@ -8,6 +8,36 @@ import (
flatbuffers "github.com/google/flatbuffers/go"
)
+type Const uint64
+
+const (
+ ConstSnapshotDoorbellSize Const = 4096
+ ConstMaxInputSize Const = 4198400
+ ConstMaxOutputSize Const = 14680064
+ ConstSnapshotShmemSize Const = 33554432
+)
+
+var EnumNamesConst = map[Const]string{
+ ConstSnapshotDoorbellSize: "SnapshotDoorbellSize",
+ ConstMaxInputSize: "MaxInputSize",
+ ConstMaxOutputSize: "MaxOutputSize",
+ ConstSnapshotShmemSize: "SnapshotShmemSize",
+}
+
+var EnumValuesConst = map[string]Const{
+ "SnapshotDoorbellSize": ConstSnapshotDoorbellSize,
+ "MaxInputSize": ConstMaxInputSize,
+ "MaxOutputSize": ConstMaxOutputSize,
+ "SnapshotShmemSize": ConstSnapshotShmemSize,
+}
+
+func (v Const) String() string {
+ if s, ok := EnumNamesConst[v]; ok {
+ return s
+ }
+ return "Const(" + strconv.FormatInt(int64(v), 10) + ")"
+}
+
type Feature uint64
const (
@@ -389,6 +419,45 @@ func (v CallFlag) String() string {
return "CallFlag(" + strconv.FormatInt(int64(v), 10) + ")"
}
+type SnapshotState uint64
+
+const (
+ SnapshotStateInitial SnapshotState = 0
+ SnapshotStateHandshake SnapshotState = 1
+ SnapshotStateReady SnapshotState = 2
+ SnapshotStateSnapshotted SnapshotState = 3
+ SnapshotStateExecute SnapshotState = 4
+ SnapshotStateExecuted SnapshotState = 5
+ SnapshotStateFailed SnapshotState = 6
+)
+
+var EnumNamesSnapshotState = map[SnapshotState]string{
+ SnapshotStateInitial: "Initial",
+ SnapshotStateHandshake: "Handshake",
+ SnapshotStateReady: "Ready",
+ SnapshotStateSnapshotted: "Snapshotted",
+ SnapshotStateExecute: "Execute",
+ SnapshotStateExecuted: "Executed",
+ SnapshotStateFailed: "Failed",
+}
+
+var EnumValuesSnapshotState = map[string]SnapshotState{
+ "Initial": SnapshotStateInitial,
+ "Handshake": SnapshotStateHandshake,
+ "Ready": SnapshotStateReady,
+ "Snapshotted": SnapshotStateSnapshotted,
+ "Execute": SnapshotStateExecute,
+ "Executed": SnapshotStateExecuted,
+ "Failed": SnapshotStateFailed,
+}
+
+func (v SnapshotState) String() string {
+ if s, ok := EnumNamesSnapshotState[v]; ok {
+ return s
+ }
+ return "SnapshotState(" + strconv.FormatInt(int64(v), 10) + ")"
+}
+
type ConnectRequestRawT struct {
Name string `json:"name"`
Arch string `json:"arch"`
@@ -3189,3 +3258,491 @@ func StateResultRawStartDataVector(builder *flatbuffers.Builder, numElems int) f
func StateResultRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
+
+type SnapshotHeaderT struct {
+ State SnapshotState `json:"state"`
+ OutputOffset uint32 `json:"output_offset"`
+ OutputSize uint32 `json:"output_size"`
+}
+
+func (t *SnapshotHeaderT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ if t == nil {
+ return 0
+ }
+ SnapshotHeaderStart(builder)
+ SnapshotHeaderAddState(builder, t.State)
+ SnapshotHeaderAddOutputOffset(builder, t.OutputOffset)
+ SnapshotHeaderAddOutputSize(builder, t.OutputSize)
+ return SnapshotHeaderEnd(builder)
+}
+
+func (rcv *SnapshotHeader) UnPackTo(t *SnapshotHeaderT) {
+ t.State = rcv.State()
+ t.OutputOffset = rcv.OutputOffset()
+ t.OutputSize = rcv.OutputSize()
+}
+
+func (rcv *SnapshotHeader) UnPack() *SnapshotHeaderT {
+ if rcv == nil {
+ return nil
+ }
+ t := &SnapshotHeaderT{}
+ rcv.UnPackTo(t)
+ return t
+}
+
+type SnapshotHeader struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsSnapshotHeader(buf []byte, offset flatbuffers.UOffsetT) *SnapshotHeader {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &SnapshotHeader{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func GetSizePrefixedRootAsSnapshotHeader(buf []byte, offset flatbuffers.UOffsetT) *SnapshotHeader {
+ n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
+ x := &SnapshotHeader{}
+ x.Init(buf, n+offset+flatbuffers.SizeUint32)
+ return x
+}
+
+func (rcv *SnapshotHeader) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *SnapshotHeader) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *SnapshotHeader) State() SnapshotState {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return SnapshotState(rcv._tab.GetUint64(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+func (rcv *SnapshotHeader) MutateState(n SnapshotState) bool {
+ return rcv._tab.MutateUint64Slot(4, uint64(n))
+}
+
+func (rcv *SnapshotHeader) OutputOffset() uint32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.GetUint32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *SnapshotHeader) MutateOutputOffset(n uint32) bool {
+ return rcv._tab.MutateUint32Slot(6, n)
+}
+
+func (rcv *SnapshotHeader) OutputSize() uint32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ return rcv._tab.GetUint32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *SnapshotHeader) MutateOutputSize(n uint32) bool {
+ return rcv._tab.MutateUint32Slot(8, n)
+}
+
+func SnapshotHeaderStart(builder *flatbuffers.Builder) {
+ builder.StartObject(3)
+}
+func SnapshotHeaderAddState(builder *flatbuffers.Builder, state SnapshotState) {
+ builder.PrependUint64Slot(0, uint64(state), 0)
+}
+func SnapshotHeaderAddOutputOffset(builder *flatbuffers.Builder, outputOffset uint32) {
+ builder.PrependUint32Slot(1, outputOffset, 0)
+}
+func SnapshotHeaderAddOutputSize(builder *flatbuffers.Builder, outputSize uint32) {
+ builder.PrependUint32Slot(2, outputSize, 0)
+}
+func SnapshotHeaderEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
+
+type SnapshotHandshakeT struct {
+ CoverEdges bool `json:"cover_edges"`
+ Kernel64Bit bool `json:"kernel_64_bit"`
+ Slowdown int32 `json:"slowdown"`
+ SyscallTimeoutMs int32 `json:"syscall_timeout_ms"`
+ ProgramTimeoutMs int32 `json:"program_timeout_ms"`
+ Features Feature `json:"features"`
+ EnvFlags ExecEnv `json:"env_flags"`
+ SandboxArg int64 `json:"sandbox_arg"`
+}
+
+func (t *SnapshotHandshakeT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ if t == nil {
+ return 0
+ }
+ SnapshotHandshakeStart(builder)
+ SnapshotHandshakeAddCoverEdges(builder, t.CoverEdges)
+ SnapshotHandshakeAddKernel64Bit(builder, t.Kernel64Bit)
+ SnapshotHandshakeAddSlowdown(builder, t.Slowdown)
+ SnapshotHandshakeAddSyscallTimeoutMs(builder, t.SyscallTimeoutMs)
+ SnapshotHandshakeAddProgramTimeoutMs(builder, t.ProgramTimeoutMs)
+ SnapshotHandshakeAddFeatures(builder, t.Features)
+ SnapshotHandshakeAddEnvFlags(builder, t.EnvFlags)
+ SnapshotHandshakeAddSandboxArg(builder, t.SandboxArg)
+ return SnapshotHandshakeEnd(builder)
+}
+
+func (rcv *SnapshotHandshake) UnPackTo(t *SnapshotHandshakeT) {
+ t.CoverEdges = rcv.CoverEdges()
+ t.Kernel64Bit = rcv.Kernel64Bit()
+ t.Slowdown = rcv.Slowdown()
+ t.SyscallTimeoutMs = rcv.SyscallTimeoutMs()
+ t.ProgramTimeoutMs = rcv.ProgramTimeoutMs()
+ t.Features = rcv.Features()
+ t.EnvFlags = rcv.EnvFlags()
+ t.SandboxArg = rcv.SandboxArg()
+}
+
+func (rcv *SnapshotHandshake) UnPack() *SnapshotHandshakeT {
+ if rcv == nil {
+ return nil
+ }
+ t := &SnapshotHandshakeT{}
+ rcv.UnPackTo(t)
+ return t
+}
+
+type SnapshotHandshake struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsSnapshotHandshake(buf []byte, offset flatbuffers.UOffsetT) *SnapshotHandshake {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &SnapshotHandshake{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func GetSizePrefixedRootAsSnapshotHandshake(buf []byte, offset flatbuffers.UOffsetT) *SnapshotHandshake {
+ n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
+ x := &SnapshotHandshake{}
+ x.Init(buf, n+offset+flatbuffers.SizeUint32)
+ return x
+}
+
+func (rcv *SnapshotHandshake) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *SnapshotHandshake) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *SnapshotHandshake) CoverEdges() bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+func (rcv *SnapshotHandshake) MutateCoverEdges(n bool) bool {
+ return rcv._tab.MutateBoolSlot(4, n)
+}
+
+func (rcv *SnapshotHandshake) Kernel64Bit() bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+func (rcv *SnapshotHandshake) MutateKernel64Bit(n bool) bool {
+ return rcv._tab.MutateBoolSlot(6, n)
+}
+
+func (rcv *SnapshotHandshake) Slowdown() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ return rcv._tab.GetInt32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *SnapshotHandshake) MutateSlowdown(n int32) bool {
+ return rcv._tab.MutateInt32Slot(8, n)
+}
+
+func (rcv *SnapshotHandshake) SyscallTimeoutMs() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ return rcv._tab.GetInt32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *SnapshotHandshake) MutateSyscallTimeoutMs(n int32) bool {
+ return rcv._tab.MutateInt32Slot(10, n)
+}
+
+func (rcv *SnapshotHandshake) ProgramTimeoutMs() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ return rcv._tab.GetInt32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *SnapshotHandshake) MutateProgramTimeoutMs(n int32) bool {
+ return rcv._tab.MutateInt32Slot(12, n)
+}
+
+func (rcv *SnapshotHandshake) Features() Feature {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
+ if o != 0 {
+ return Feature(rcv._tab.GetUint64(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+func (rcv *SnapshotHandshake) MutateFeatures(n Feature) bool {
+ return rcv._tab.MutateUint64Slot(14, uint64(n))
+}
+
+func (rcv *SnapshotHandshake) EnvFlags() ExecEnv {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ if o != 0 {
+ return ExecEnv(rcv._tab.GetUint64(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+func (rcv *SnapshotHandshake) MutateEnvFlags(n ExecEnv) bool {
+ return rcv._tab.MutateUint64Slot(16, uint64(n))
+}
+
+func (rcv *SnapshotHandshake) SandboxArg() int64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
+ if o != 0 {
+ return rcv._tab.GetInt64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *SnapshotHandshake) MutateSandboxArg(n int64) bool {
+ return rcv._tab.MutateInt64Slot(18, n)
+}
+
+func SnapshotHandshakeStart(builder *flatbuffers.Builder) {
+ builder.StartObject(8)
+}
+func SnapshotHandshakeAddCoverEdges(builder *flatbuffers.Builder, coverEdges bool) {
+ builder.PrependBoolSlot(0, coverEdges, false)
+}
+func SnapshotHandshakeAddKernel64Bit(builder *flatbuffers.Builder, kernel64Bit bool) {
+ builder.PrependBoolSlot(1, kernel64Bit, false)
+}
+func SnapshotHandshakeAddSlowdown(builder *flatbuffers.Builder, slowdown int32) {
+ builder.PrependInt32Slot(2, slowdown, 0)
+}
+func SnapshotHandshakeAddSyscallTimeoutMs(builder *flatbuffers.Builder, syscallTimeoutMs int32) {
+ builder.PrependInt32Slot(3, syscallTimeoutMs, 0)
+}
+func SnapshotHandshakeAddProgramTimeoutMs(builder *flatbuffers.Builder, programTimeoutMs int32) {
+ builder.PrependInt32Slot(4, programTimeoutMs, 0)
+}
+func SnapshotHandshakeAddFeatures(builder *flatbuffers.Builder, features Feature) {
+ builder.PrependUint64Slot(5, uint64(features), 0)
+}
+func SnapshotHandshakeAddEnvFlags(builder *flatbuffers.Builder, envFlags ExecEnv) {
+ builder.PrependUint64Slot(6, uint64(envFlags), 0)
+}
+func SnapshotHandshakeAddSandboxArg(builder *flatbuffers.Builder, sandboxArg int64) {
+ builder.PrependInt64Slot(7, sandboxArg, 0)
+}
+func SnapshotHandshakeEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
+
+type SnapshotRequestT struct {
+ ExecFlags ExecFlag `json:"exec_flags"`
+ NumCalls int32 `json:"num_calls"`
+ AllCallSignal uint64 `json:"all_call_signal"`
+ AllExtraSignal bool `json:"all_extra_signal"`
+ ProgData []byte `json:"prog_data"`
+}
+
+func (t *SnapshotRequestT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ if t == nil {
+ return 0
+ }
+ progDataOffset := flatbuffers.UOffsetT(0)
+ if t.ProgData != nil {
+ progDataOffset = builder.CreateByteString(t.ProgData)
+ }
+ SnapshotRequestStart(builder)
+ SnapshotRequestAddExecFlags(builder, t.ExecFlags)
+ SnapshotRequestAddNumCalls(builder, t.NumCalls)
+ SnapshotRequestAddAllCallSignal(builder, t.AllCallSignal)
+ SnapshotRequestAddAllExtraSignal(builder, t.AllExtraSignal)
+ SnapshotRequestAddProgData(builder, progDataOffset)
+ return SnapshotRequestEnd(builder)
+}
+
+func (rcv *SnapshotRequest) UnPackTo(t *SnapshotRequestT) {
+ t.ExecFlags = rcv.ExecFlags()
+ t.NumCalls = rcv.NumCalls()
+ t.AllCallSignal = rcv.AllCallSignal()
+ t.AllExtraSignal = rcv.AllExtraSignal()
+ t.ProgData = rcv.ProgDataBytes()
+}
+
+func (rcv *SnapshotRequest) UnPack() *SnapshotRequestT {
+ if rcv == nil {
+ return nil
+ }
+ t := &SnapshotRequestT{}
+ rcv.UnPackTo(t)
+ return t
+}
+
+type SnapshotRequest struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsSnapshotRequest(buf []byte, offset flatbuffers.UOffsetT) *SnapshotRequest {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &SnapshotRequest{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func GetSizePrefixedRootAsSnapshotRequest(buf []byte, offset flatbuffers.UOffsetT) *SnapshotRequest {
+ n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
+ x := &SnapshotRequest{}
+ x.Init(buf, n+offset+flatbuffers.SizeUint32)
+ return x
+}
+
+func (rcv *SnapshotRequest) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *SnapshotRequest) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *SnapshotRequest) ExecFlags() ExecFlag {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return ExecFlag(rcv._tab.GetUint64(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+func (rcv *SnapshotRequest) MutateExecFlags(n ExecFlag) bool {
+ return rcv._tab.MutateUint64Slot(4, uint64(n))
+}
+
+func (rcv *SnapshotRequest) NumCalls() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.GetInt32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *SnapshotRequest) MutateNumCalls(n int32) bool {
+ return rcv._tab.MutateInt32Slot(6, n)
+}
+
+func (rcv *SnapshotRequest) AllCallSignal() uint64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ return rcv._tab.GetUint64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *SnapshotRequest) MutateAllCallSignal(n uint64) bool {
+ return rcv._tab.MutateUint64Slot(8, n)
+}
+
+func (rcv *SnapshotRequest) AllExtraSignal() bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+func (rcv *SnapshotRequest) MutateAllExtraSignal(n bool) bool {
+ return rcv._tab.MutateBoolSlot(10, n)
+}
+
+func (rcv *SnapshotRequest) ProgData(j int) byte {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1))
+ }
+ return 0
+}
+
+func (rcv *SnapshotRequest) ProgDataLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+func (rcv *SnapshotRequest) ProgDataBytes() []byte {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ return rcv._tab.ByteVector(o + rcv._tab.Pos)
+ }
+ return nil
+}
+
+func (rcv *SnapshotRequest) MutateProgData(j int, n byte) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.MutateByte(a+flatbuffers.UOffsetT(j*1), n)
+ }
+ return false
+}
+
+func SnapshotRequestStart(builder *flatbuffers.Builder) {
+ builder.StartObject(5)
+}
+func SnapshotRequestAddExecFlags(builder *flatbuffers.Builder, execFlags ExecFlag) {
+ builder.PrependUint64Slot(0, uint64(execFlags), 0)
+}
+func SnapshotRequestAddNumCalls(builder *flatbuffers.Builder, numCalls int32) {
+ builder.PrependInt32Slot(1, numCalls, 0)
+}
+func SnapshotRequestAddAllCallSignal(builder *flatbuffers.Builder, allCallSignal uint64) {
+ builder.PrependUint64Slot(2, allCallSignal, 0)
+}
+func SnapshotRequestAddAllExtraSignal(builder *flatbuffers.Builder, allExtraSignal bool) {
+ builder.PrependBoolSlot(3, allExtraSignal, false)
+}
+func SnapshotRequestAddProgData(builder *flatbuffers.Builder, progData flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(progData), 0)
+}
+func SnapshotRequestStartProgDataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(1, numElems, 1)
+}
+func SnapshotRequestEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/pkg/flatrpc/flatrpc.h b/pkg/flatrpc/flatrpc.h
index 8be575885..88defed83 100644
--- a/pkg/flatrpc/flatrpc.h
+++ b/pkg/flatrpc/flatrpc.h
@@ -91,6 +91,47 @@ struct StateResultRaw;
struct StateResultRawBuilder;
struct StateResultRawT;
+struct SnapshotHeader;
+struct SnapshotHeaderBuilder;
+struct SnapshotHeaderT;
+
+struct SnapshotHandshake;
+struct SnapshotHandshakeBuilder;
+struct SnapshotHandshakeT;
+
+struct SnapshotRequest;
+struct SnapshotRequestBuilder;
+struct SnapshotRequestT;
+
+enum class Const : uint64_t {
+ SnapshotDoorbellSize = 4096ULL,
+ MaxInputSize = 4198400ULL,
+ MaxOutputSize = 14680064ULL,
+ SnapshotShmemSize = 33554432ULL,
+ MIN = SnapshotDoorbellSize,
+ MAX = SnapshotShmemSize
+};
+
+inline const Const (&EnumValuesConst())[4] {
+ static const Const values[] = {
+ Const::SnapshotDoorbellSize,
+ Const::MaxInputSize,
+ Const::MaxOutputSize,
+ Const::SnapshotShmemSize
+ };
+ return values;
+}
+
+inline const char *EnumNameConst(Const e) {
+ switch (e) {
+ case Const::SnapshotDoorbellSize: return "SnapshotDoorbellSize";
+ case Const::MaxInputSize: return "MaxInputSize";
+ case Const::MaxOutputSize: return "MaxOutputSize";
+ case Const::SnapshotShmemSize: return "SnapshotShmemSize";
+ default: return "";
+ }
+}
+
enum class Feature : uint64_t {
Coverage = 1ULL,
Comparisons = 2ULL,
@@ -644,6 +685,51 @@ inline const char *EnumNameCallFlag(CallFlag e) {
return EnumNamesCallFlag()[index];
}
+enum class SnapshotState : uint64_t {
+ Initial = 0,
+ Handshake = 1ULL,
+ Ready = 2ULL,
+ Snapshotted = 3ULL,
+ Execute = 4ULL,
+ Executed = 5ULL,
+ Failed = 6ULL,
+ MIN = Initial,
+ MAX = Failed
+};
+
+inline const SnapshotState (&EnumValuesSnapshotState())[7] {
+ static const SnapshotState values[] = {
+ SnapshotState::Initial,
+ SnapshotState::Handshake,
+ SnapshotState::Ready,
+ SnapshotState::Snapshotted,
+ SnapshotState::Execute,
+ SnapshotState::Executed,
+ SnapshotState::Failed
+ };
+ return values;
+}
+
+inline const char * const *EnumNamesSnapshotState() {
+ static const char * const names[8] = {
+ "Initial",
+ "Handshake",
+ "Ready",
+ "Snapshotted",
+ "Execute",
+ "Executed",
+ "Failed",
+ nullptr
+ };
+ return names;
+}
+
+inline const char *EnumNameSnapshotState(SnapshotState e) {
+ if (flatbuffers::IsOutRange(e, SnapshotState::Initial, SnapshotState::Failed)) return "";
+ const size_t index = static_cast<size_t>(e);
+ return EnumNamesSnapshotState()[index];
+}
+
FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(8) ExecOptsRaw FLATBUFFERS_FINAL_CLASS {
private:
uint64_t env_flags_;
@@ -2436,6 +2522,323 @@ inline flatbuffers::Offset<StateResultRaw> CreateStateResultRawDirect(
flatbuffers::Offset<StateResultRaw> CreateStateResultRaw(flatbuffers::FlatBufferBuilder &_fbb, const StateResultRawT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+struct SnapshotHeaderT : public flatbuffers::NativeTable {
+ typedef SnapshotHeader TableType;
+ rpc::SnapshotState state = rpc::SnapshotState::Initial;
+ uint32_t output_offset = 0;
+ uint32_t output_size = 0;
+};
+
+struct SnapshotHeader FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef SnapshotHeaderT NativeTableType;
+ typedef SnapshotHeaderBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+ VT_STATE = 4,
+ VT_OUTPUT_OFFSET = 6,
+ VT_OUTPUT_SIZE = 8
+ };
+ rpc::SnapshotState state() const {
+ return static_cast<rpc::SnapshotState>(GetField<uint64_t>(VT_STATE, 0));
+ }
+ uint32_t output_offset() const {
+ return GetField<uint32_t>(VT_OUTPUT_OFFSET, 0);
+ }
+ uint32_t output_size() const {
+ return GetField<uint32_t>(VT_OUTPUT_SIZE, 0);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<uint64_t>(verifier, VT_STATE, 8) &&
+ VerifyField<uint32_t>(verifier, VT_OUTPUT_OFFSET, 4) &&
+ VerifyField<uint32_t>(verifier, VT_OUTPUT_SIZE, 4) &&
+ verifier.EndTable();
+ }
+ SnapshotHeaderT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SnapshotHeaderT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SnapshotHeader> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SnapshotHeaderT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SnapshotHeaderBuilder {
+ typedef SnapshotHeader Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_state(rpc::SnapshotState state) {
+ fbb_.AddElement<uint64_t>(SnapshotHeader::VT_STATE, static_cast<uint64_t>(state), 0);
+ }
+ void add_output_offset(uint32_t output_offset) {
+ fbb_.AddElement<uint32_t>(SnapshotHeader::VT_OUTPUT_OFFSET, output_offset, 0);
+ }
+ void add_output_size(uint32_t output_size) {
+ fbb_.AddElement<uint32_t>(SnapshotHeader::VT_OUTPUT_SIZE, output_size, 0);
+ }
+ explicit SnapshotHeaderBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SnapshotHeader> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SnapshotHeader>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SnapshotHeader> CreateSnapshotHeader(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ rpc::SnapshotState state = rpc::SnapshotState::Initial,
+ uint32_t output_offset = 0,
+ uint32_t output_size = 0) {
+ SnapshotHeaderBuilder builder_(_fbb);
+ builder_.add_state(state);
+ builder_.add_output_size(output_size);
+ builder_.add_output_offset(output_offset);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SnapshotHeader> CreateSnapshotHeader(flatbuffers::FlatBufferBuilder &_fbb, const SnapshotHeaderT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SnapshotHandshakeT : public flatbuffers::NativeTable {
+ typedef SnapshotHandshake TableType;
+ bool cover_edges = false;
+ bool kernel_64_bit = false;
+ int32_t slowdown = 0;
+ int32_t syscall_timeout_ms = 0;
+ int32_t program_timeout_ms = 0;
+ rpc::Feature features = static_cast<rpc::Feature>(0);
+ rpc::ExecEnv env_flags = static_cast<rpc::ExecEnv>(0);
+ int64_t sandbox_arg = 0;
+};
+
+struct SnapshotHandshake FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef SnapshotHandshakeT NativeTableType;
+ typedef SnapshotHandshakeBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+ VT_COVER_EDGES = 4,
+ VT_KERNEL_64_BIT = 6,
+ VT_SLOWDOWN = 8,
+ VT_SYSCALL_TIMEOUT_MS = 10,
+ VT_PROGRAM_TIMEOUT_MS = 12,
+ VT_FEATURES = 14,
+ VT_ENV_FLAGS = 16,
+ VT_SANDBOX_ARG = 18
+ };
+ bool cover_edges() const {
+ return GetField<uint8_t>(VT_COVER_EDGES, 0) != 0;
+ }
+ bool kernel_64_bit() const {
+ return GetField<uint8_t>(VT_KERNEL_64_BIT, 0) != 0;
+ }
+ int32_t slowdown() const {
+ return GetField<int32_t>(VT_SLOWDOWN, 0);
+ }
+ int32_t syscall_timeout_ms() const {
+ return GetField<int32_t>(VT_SYSCALL_TIMEOUT_MS, 0);
+ }
+ int32_t program_timeout_ms() const {
+ return GetField<int32_t>(VT_PROGRAM_TIMEOUT_MS, 0);
+ }
+ rpc::Feature features() const {
+ return static_cast<rpc::Feature>(GetField<uint64_t>(VT_FEATURES, 0));
+ }
+ rpc::ExecEnv env_flags() const {
+ return static_cast<rpc::ExecEnv>(GetField<uint64_t>(VT_ENV_FLAGS, 0));
+ }
+ int64_t sandbox_arg() const {
+ return GetField<int64_t>(VT_SANDBOX_ARG, 0);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<uint8_t>(verifier, VT_COVER_EDGES, 1) &&
+ VerifyField<uint8_t>(verifier, VT_KERNEL_64_BIT, 1) &&
+ VerifyField<int32_t>(verifier, VT_SLOWDOWN, 4) &&
+ VerifyField<int32_t>(verifier, VT_SYSCALL_TIMEOUT_MS, 4) &&
+ VerifyField<int32_t>(verifier, VT_PROGRAM_TIMEOUT_MS, 4) &&
+ VerifyField<uint64_t>(verifier, VT_FEATURES, 8) &&
+ VerifyField<uint64_t>(verifier, VT_ENV_FLAGS, 8) &&
+ VerifyField<int64_t>(verifier, VT_SANDBOX_ARG, 8) &&
+ verifier.EndTable();
+ }
+ SnapshotHandshakeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SnapshotHandshakeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SnapshotHandshake> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SnapshotHandshakeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SnapshotHandshakeBuilder {
+ typedef SnapshotHandshake Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_cover_edges(bool cover_edges) {
+ fbb_.AddElement<uint8_t>(SnapshotHandshake::VT_COVER_EDGES, static_cast<uint8_t>(cover_edges), 0);
+ }
+ void add_kernel_64_bit(bool kernel_64_bit) {
+ fbb_.AddElement<uint8_t>(SnapshotHandshake::VT_KERNEL_64_BIT, static_cast<uint8_t>(kernel_64_bit), 0);
+ }
+ void add_slowdown(int32_t slowdown) {
+ fbb_.AddElement<int32_t>(SnapshotHandshake::VT_SLOWDOWN, slowdown, 0);
+ }
+ void add_syscall_timeout_ms(int32_t syscall_timeout_ms) {
+ fbb_.AddElement<int32_t>(SnapshotHandshake::VT_SYSCALL_TIMEOUT_MS, syscall_timeout_ms, 0);
+ }
+ void add_program_timeout_ms(int32_t program_timeout_ms) {
+ fbb_.AddElement<int32_t>(SnapshotHandshake::VT_PROGRAM_TIMEOUT_MS, program_timeout_ms, 0);
+ }
+ void add_features(rpc::Feature features) {
+ fbb_.AddElement<uint64_t>(SnapshotHandshake::VT_FEATURES, static_cast<uint64_t>(features), 0);
+ }
+ void add_env_flags(rpc::ExecEnv env_flags) {
+ fbb_.AddElement<uint64_t>(SnapshotHandshake::VT_ENV_FLAGS, static_cast<uint64_t>(env_flags), 0);
+ }
+ void add_sandbox_arg(int64_t sandbox_arg) {
+ fbb_.AddElement<int64_t>(SnapshotHandshake::VT_SANDBOX_ARG, sandbox_arg, 0);
+ }
+ explicit SnapshotHandshakeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SnapshotHandshake> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SnapshotHandshake>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SnapshotHandshake> CreateSnapshotHandshake(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ bool cover_edges = false,
+ bool kernel_64_bit = false,
+ int32_t slowdown = 0,
+ int32_t syscall_timeout_ms = 0,
+ int32_t program_timeout_ms = 0,
+ rpc::Feature features = static_cast<rpc::Feature>(0),
+ rpc::ExecEnv env_flags = static_cast<rpc::ExecEnv>(0),
+ int64_t sandbox_arg = 0) {
+ SnapshotHandshakeBuilder builder_(_fbb);
+ builder_.add_sandbox_arg(sandbox_arg);
+ builder_.add_env_flags(env_flags);
+ builder_.add_features(features);
+ builder_.add_program_timeout_ms(program_timeout_ms);
+ builder_.add_syscall_timeout_ms(syscall_timeout_ms);
+ builder_.add_slowdown(slowdown);
+ builder_.add_kernel_64_bit(kernel_64_bit);
+ builder_.add_cover_edges(cover_edges);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SnapshotHandshake> CreateSnapshotHandshake(flatbuffers::FlatBufferBuilder &_fbb, const SnapshotHandshakeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SnapshotRequestT : public flatbuffers::NativeTable {
+ typedef SnapshotRequest TableType;
+ rpc::ExecFlag exec_flags = static_cast<rpc::ExecFlag>(0);
+ int32_t num_calls = 0;
+ uint64_t all_call_signal = 0;
+ bool all_extra_signal = false;
+ std::vector<uint8_t> prog_data{};
+};
+
+struct SnapshotRequest FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef SnapshotRequestT NativeTableType;
+ typedef SnapshotRequestBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+ VT_EXEC_FLAGS = 4,
+ VT_NUM_CALLS = 6,
+ VT_ALL_CALL_SIGNAL = 8,
+ VT_ALL_EXTRA_SIGNAL = 10,
+ VT_PROG_DATA = 12
+ };
+ rpc::ExecFlag exec_flags() const {
+ return static_cast<rpc::ExecFlag>(GetField<uint64_t>(VT_EXEC_FLAGS, 0));
+ }
+ int32_t num_calls() const {
+ return GetField<int32_t>(VT_NUM_CALLS, 0);
+ }
+ uint64_t all_call_signal() const {
+ return GetField<uint64_t>(VT_ALL_CALL_SIGNAL, 0);
+ }
+ bool all_extra_signal() const {
+ return GetField<uint8_t>(VT_ALL_EXTRA_SIGNAL, 0) != 0;
+ }
+ const flatbuffers::Vector<uint8_t> *prog_data() const {
+ return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_PROG_DATA);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<uint64_t>(verifier, VT_EXEC_FLAGS, 8) &&
+ VerifyField<int32_t>(verifier, VT_NUM_CALLS, 4) &&
+ VerifyField<uint64_t>(verifier, VT_ALL_CALL_SIGNAL, 8) &&
+ VerifyField<uint8_t>(verifier, VT_ALL_EXTRA_SIGNAL, 1) &&
+ VerifyOffset(verifier, VT_PROG_DATA) &&
+ verifier.VerifyVector(prog_data()) &&
+ verifier.EndTable();
+ }
+ SnapshotRequestT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SnapshotRequestT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SnapshotRequest> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SnapshotRequestT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SnapshotRequestBuilder {
+ typedef SnapshotRequest Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_exec_flags(rpc::ExecFlag exec_flags) {
+ fbb_.AddElement<uint64_t>(SnapshotRequest::VT_EXEC_FLAGS, static_cast<uint64_t>(exec_flags), 0);
+ }
+ void add_num_calls(int32_t num_calls) {
+ fbb_.AddElement<int32_t>(SnapshotRequest::VT_NUM_CALLS, num_calls, 0);
+ }
+ void add_all_call_signal(uint64_t all_call_signal) {
+ fbb_.AddElement<uint64_t>(SnapshotRequest::VT_ALL_CALL_SIGNAL, all_call_signal, 0);
+ }
+ void add_all_extra_signal(bool all_extra_signal) {
+ fbb_.AddElement<uint8_t>(SnapshotRequest::VT_ALL_EXTRA_SIGNAL, static_cast<uint8_t>(all_extra_signal), 0);
+ }
+ void add_prog_data(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> prog_data) {
+ fbb_.AddOffset(SnapshotRequest::VT_PROG_DATA, prog_data);
+ }
+ explicit SnapshotRequestBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SnapshotRequest> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SnapshotRequest>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SnapshotRequest> CreateSnapshotRequest(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ rpc::ExecFlag exec_flags = static_cast<rpc::ExecFlag>(0),
+ int32_t num_calls = 0,
+ uint64_t all_call_signal = 0,
+ bool all_extra_signal = false,
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> prog_data = 0) {
+ SnapshotRequestBuilder builder_(_fbb);
+ builder_.add_all_call_signal(all_call_signal);
+ builder_.add_exec_flags(exec_flags);
+ builder_.add_prog_data(prog_data);
+ builder_.add_num_calls(num_calls);
+ builder_.add_all_extra_signal(all_extra_signal);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<SnapshotRequest> CreateSnapshotRequestDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ rpc::ExecFlag exec_flags = static_cast<rpc::ExecFlag>(0),
+ int32_t num_calls = 0,
+ uint64_t all_call_signal = 0,
+ bool all_extra_signal = false,
+ const std::vector<uint8_t> *prog_data = nullptr) {
+ auto prog_data__ = prog_data ? _fbb.CreateVector<uint8_t>(*prog_data) : 0;
+ return rpc::CreateSnapshotRequest(
+ _fbb,
+ exec_flags,
+ num_calls,
+ all_call_signal,
+ all_extra_signal,
+ prog_data__);
+}
+
+flatbuffers::Offset<SnapshotRequest> CreateSnapshotRequest(flatbuffers::FlatBufferBuilder &_fbb, const SnapshotRequestT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
inline ConnectRequestRawT *ConnectRequestRaw::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = std::unique_ptr<ConnectRequestRawT>(new ConnectRequestRawT());
UnPackTo(_o.get(), _resolver);
@@ -3099,6 +3502,123 @@ inline flatbuffers::Offset<StateResultRaw> CreateStateResultRaw(flatbuffers::Fla
_data);
}
+inline SnapshotHeaderT *SnapshotHeader::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = std::unique_ptr<SnapshotHeaderT>(new SnapshotHeaderT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SnapshotHeader::UnPackTo(SnapshotHeaderT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = state(); _o->state = _e; }
+ { auto _e = output_offset(); _o->output_offset = _e; }
+ { auto _e = output_size(); _o->output_size = _e; }
+}
+
+inline flatbuffers::Offset<SnapshotHeader> SnapshotHeader::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SnapshotHeaderT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateSnapshotHeader(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SnapshotHeader> CreateSnapshotHeader(flatbuffers::FlatBufferBuilder &_fbb, const SnapshotHeaderT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SnapshotHeaderT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _state = _o->state;
+ auto _output_offset = _o->output_offset;
+ auto _output_size = _o->output_size;
+ return rpc::CreateSnapshotHeader(
+ _fbb,
+ _state,
+ _output_offset,
+ _output_size);
+}
+
+inline SnapshotHandshakeT *SnapshotHandshake::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = std::unique_ptr<SnapshotHandshakeT>(new SnapshotHandshakeT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SnapshotHandshake::UnPackTo(SnapshotHandshakeT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = cover_edges(); _o->cover_edges = _e; }
+ { auto _e = kernel_64_bit(); _o->kernel_64_bit = _e; }
+ { auto _e = slowdown(); _o->slowdown = _e; }
+ { auto _e = syscall_timeout_ms(); _o->syscall_timeout_ms = _e; }
+ { auto _e = program_timeout_ms(); _o->program_timeout_ms = _e; }
+ { auto _e = features(); _o->features = _e; }
+ { auto _e = env_flags(); _o->env_flags = _e; }
+ { auto _e = sandbox_arg(); _o->sandbox_arg = _e; }
+}
+
+inline flatbuffers::Offset<SnapshotHandshake> SnapshotHandshake::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SnapshotHandshakeT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateSnapshotHandshake(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SnapshotHandshake> CreateSnapshotHandshake(flatbuffers::FlatBufferBuilder &_fbb, const SnapshotHandshakeT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SnapshotHandshakeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _cover_edges = _o->cover_edges;
+ auto _kernel_64_bit = _o->kernel_64_bit;
+ auto _slowdown = _o->slowdown;
+ auto _syscall_timeout_ms = _o->syscall_timeout_ms;
+ auto _program_timeout_ms = _o->program_timeout_ms;
+ auto _features = _o->features;
+ auto _env_flags = _o->env_flags;
+ auto _sandbox_arg = _o->sandbox_arg;
+ return rpc::CreateSnapshotHandshake(
+ _fbb,
+ _cover_edges,
+ _kernel_64_bit,
+ _slowdown,
+ _syscall_timeout_ms,
+ _program_timeout_ms,
+ _features,
+ _env_flags,
+ _sandbox_arg);
+}
+
+inline SnapshotRequestT *SnapshotRequest::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = std::unique_ptr<SnapshotRequestT>(new SnapshotRequestT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SnapshotRequest::UnPackTo(SnapshotRequestT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = exec_flags(); _o->exec_flags = _e; }
+ { auto _e = num_calls(); _o->num_calls = _e; }
+ { auto _e = all_call_signal(); _o->all_call_signal = _e; }
+ { auto _e = all_extra_signal(); _o->all_extra_signal = _e; }
+ { auto _e = prog_data(); if (_e) { _o->prog_data.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->prog_data.begin()); } }
+}
+
+inline flatbuffers::Offset<SnapshotRequest> SnapshotRequest::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SnapshotRequestT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateSnapshotRequest(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SnapshotRequest> CreateSnapshotRequest(flatbuffers::FlatBufferBuilder &_fbb, const SnapshotRequestT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SnapshotRequestT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _exec_flags = _o->exec_flags;
+ auto _num_calls = _o->num_calls;
+ auto _all_call_signal = _o->all_call_signal;
+ auto _all_extra_signal = _o->all_extra_signal;
+ auto _prog_data = _o->prog_data.size() ? _fbb.CreateVector(_o->prog_data) : 0;
+ return rpc::CreateSnapshotRequest(
+ _fbb,
+ _exec_flags,
+ _num_calls,
+ _all_call_signal,
+ _all_extra_signal,
+ _prog_data);
+}
+
inline bool VerifyHostMessagesRaw(flatbuffers::Verifier &verifier, const void *obj, HostMessagesRaw type) {
switch (type) {
case HostMessagesRaw::NONE: {
diff --git a/pkg/flatrpc/helpers.go b/pkg/flatrpc/helpers.go
index 4c15bb836..697f9eefa 100644
--- a/pkg/flatrpc/helpers.go
+++ b/pkg/flatrpc/helpers.go
@@ -6,7 +6,9 @@ package flatrpc
import (
"fmt"
"slices"
+ "sync/atomic"
"syscall"
+ "unsafe"
)
const AllFeatures = ^Feature(0)
@@ -101,3 +103,11 @@ func FlagsToSandbox(flags ExecEnv) string {
}
panic("no sandbox flags present")
}
+
+func (hdr *SnapshotHeaderT) UpdateState(state SnapshotState) {
+ atomic.StoreUint64((*uint64)(unsafe.Pointer(&hdr.State)), uint64(state))
+}
+
+func (hdr *SnapshotHeaderT) LoadState() SnapshotState {
+ return SnapshotState(atomic.LoadUint64((*uint64)(unsafe.Pointer(&hdr.State))))
+}
diff --git a/pkg/log/log.go b/pkg/log/log.go
index 81e997b09..216f00a6a 100644
--- a/pkg/log/log.go
+++ b/pkg/log/log.go
@@ -79,6 +79,10 @@ func Logf(v int, msg string, args ...interface{}) {
writeMessage(v, "", msg, args...)
}
+func Error(err error) {
+ Errorf("%v", err)
+}
+
func Errorf(msg string, args ...interface{}) {
writeMessage(0, "ERROR", msg, args...)
}
diff --git a/pkg/mgrconfig/load.go b/pkg/mgrconfig/load.go
index eb20b6b68..6f92a232e 100644
--- a/pkg/mgrconfig/load.go
+++ b/pkg/mgrconfig/load.go
@@ -130,6 +130,9 @@ func Complete(cfg *Config) error {
); err != nil {
return err
}
+ if cfg.Snapshot && cfg.Reproduce {
+ return fmt.Errorf("reproduction is not (yet) supported in snapshot mode")
+ }
cfg.Workdir = osutil.Abs(cfg.Workdir)
if cfg.WorkdirTemplate != "" {
cfg.WorkdirTemplate = osutil.Abs(cfg.WorkdirTemplate)
@@ -159,6 +162,29 @@ func Complete(cfg *Config) error {
}
cfg.CompleteKernelDirs()
+ if err := cfg.completeServices(); err != nil {
+ return nil
+ }
+
+ if cfg.FuzzingVMs < 0 {
+ return fmt.Errorf("fuzzing_vms cannot be less than 0")
+ }
+
+ var err error
+ cfg.Syscalls, err = ParseEnabledSyscalls(cfg.Target, cfg.EnabledSyscalls, cfg.DisabledSyscalls)
+ if err != nil {
+ return err
+ }
+ cfg.NoMutateCalls, err = ParseNoMutateSyscalls(cfg.Target, cfg.NoMutateSyscalls)
+ if err != nil {
+ return err
+ }
+ cfg.initTimeouts()
+ cfg.VMLess = cfg.Type == "none"
+ return nil
+}
+
+func (cfg *Config) completeServices() error {
if cfg.HubClient != "" {
if err := checkNonEmpty(
cfg.Name, "name",
@@ -179,30 +205,14 @@ func Complete(cfg *Config) error {
return err
}
}
- if cfg.FuzzingVMs < 0 {
- return fmt.Errorf("fuzzing_vms cannot be less than 0")
- }
-
- var err error
- cfg.Syscalls, err = ParseEnabledSyscalls(cfg.Target, cfg.EnabledSyscalls, cfg.DisabledSyscalls)
- if err != nil {
- return err
- }
- cfg.NoMutateCalls, err = ParseNoMutateSyscalls(cfg.Target, cfg.NoMutateSyscalls)
- if err != nil {
- return err
- }
if !cfg.AssetStorage.IsEmpty() {
if cfg.DashboardClient == "" {
return fmt.Errorf("asset storage also requires dashboard client")
}
- err = cfg.AssetStorage.Validate()
- if err != nil {
+ if err := cfg.AssetStorage.Validate(); err != nil {
return err
}
}
- cfg.initTimeouts()
- cfg.VMLess = cfg.Type == "none"
return nil
}
diff --git a/pkg/rpcserver/rpcserver.go b/pkg/rpcserver/rpcserver.go
index 8e6f813bb..bb48c391d 100644
--- a/pkg/rpcserver/rpcserver.go
+++ b/pkg/rpcserver/rpcserver.go
@@ -147,9 +147,9 @@ func newImpl(ctx context.Context, cfg *Config, mgr Manager) (*Server, error) {
runnerStats: &runnerStats{
statExecRetries: stat.New("exec retries",
"Number of times a test program was restarted because the first run failed",
- stats.Rate{}, stats.Graph("executor")),
- statExecutorRestarts: stats.Create("executor restarts",
- "Number of times executor process was restarted", stats.Rate{}, stats.Graph("executor")),
+ stat.Rate{}, stat.Graph("executor")),
+ statExecutorRestarts: stat.New("executor restarts",
+ "Number of times executor process was restarted", stat.Rate{}, stat.Graph("executor")),
statExecBufferTooSmall: queue.StatExecBufferTooSmall,
statExecs: queue.StatExecs,
statNoExecRequests: queue.StatNoExecRequests,
diff --git a/syz-manager/manager.go b/syz-manager/manager.go
index 2360e4f53..e8af56520 100644
--- a/syz-manager/manager.go
+++ b/syz-manager/manager.go
@@ -84,6 +84,7 @@ type Manager struct {
corpusPreload chan []fuzzer.Candidate
firstConnect atomic.Int64 // unix time, or 0 if not connected
crashTypes map[string]bool
+ loopStop func()
enabledFeatures flatrpc.Feature
checkDone atomic.Bool
fresh bool
@@ -98,6 +99,7 @@ type Manager struct {
mu sync.Mutex
fuzzer atomic.Pointer[fuzzer.Fuzzer]
+ source queue.Source
phase int
targetEnabledSyscalls map[*prog.Syscall]bool
@@ -310,12 +312,19 @@ func RunManager(mode Mode, cfg *mgrconfig.Config) {
<-vm.Shutdown
return
}
- ctx := vm.ShutdownCtx()
+ ctx, cancel := context.WithCancel(vm.ShutdownCtx())
+ mgr.loopStop = cancel
mgr.pool = vm.NewDispatcher(mgr.vmPool, mgr.fuzzerInstance)
mgr.reproMgr = newReproManager(mgr, mgr.vmPool.Count()-mgr.cfg.FuzzingVMs, mgr.cfg.DashboardOnlyRepro)
go mgr.processFuzzingResults(ctx)
go mgr.reproMgr.Loop(ctx)
mgr.pool.Loop(ctx)
+ if cfg.Snapshot {
+ log.Logf(0, "starting VMs for snapshot mode")
+ mgr.serv.Close()
+ mgr.serv = nil
+ mgr.snapshotLoop()
+ }
}
// Exit successfully in special operation modes.
@@ -1370,7 +1379,16 @@ func (mgr *Manager) MachineChecked(features flatrpc.Feature, enabledSyscalls map
go mgr.dashboardReproTasks()
}
}
- return queue.DefaultOpts(fuzzerObj, opts)
+ source := queue.DefaultOpts(fuzzerObj, opts)
+ if mgr.cfg.Snapshot {
+ log.Logf(0, "stopping VMs for snapshot mode")
+ mgr.source = source
+ mgr.loopStop()
+ return queue.Callback(func() *queue.Request {
+ return nil
+ })
+ }
+ return source
} else if mgr.mode == ModeCorpusRun {
ctx := &corpusRunner{
candidates: corpus,
@@ -1430,6 +1448,9 @@ func (cr *corpusRunner) Next() *queue.Request {
func (mgr *Manager) defaultExecOpts() flatrpc.ExecOpts {
env := csource.FeaturesToFlags(mgr.enabledFeatures, nil)
+ if *flagDebug {
+ env |= flatrpc.ExecEnvDebug
+ }
if mgr.cfg.Experimental.ResetAccState {
env |= flatrpc.ExecEnvResetState
}
@@ -1470,7 +1491,7 @@ func (mgr *Manager) MaxSignal() signal.Signal {
func (mgr *Manager) fuzzerLoop(fuzzer *fuzzer.Fuzzer) {
for ; ; time.Sleep(time.Second / 2) {
- if mgr.cfg.Cover {
+ if mgr.cfg.Cover && !mgr.cfg.Snapshot {
// Distribute new max signal over all instances.
newSignal := fuzzer.Cover.GrabSignalDelta()
log.Logf(3, "distributing %d new signal", len(newSignal))
@@ -1486,7 +1507,7 @@ func (mgr *Manager) fuzzerLoop(fuzzer *fuzzer.Fuzzer) {
}
mgr.mu.Lock()
if mgr.phase == phaseLoadedCorpus {
- if mgr.enabledFeatures&flatrpc.FeatureLeak != 0 {
+ if !mgr.cfg.Snapshot && mgr.enabledFeatures&flatrpc.FeatureLeak != 0 {
mgr.serv.TriagedCorpus()
}
if mgr.cfg.HubClient != "" {
diff --git a/syz-manager/snapshot.go b/syz-manager/snapshot.go
new file mode 100644
index 000000000..8bd27a1c6
--- /dev/null
+++ b/syz-manager/snapshot.go
@@ -0,0 +1,175 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/google/flatbuffers/go"
+ "github.com/google/syzkaller/pkg/flatrpc"
+ "github.com/google/syzkaller/pkg/fuzzer/queue"
+ "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/vm"
+)
+
+func (mgr *Manager) snapshotLoop() {
+ queue.StatNumFuzzing.Add(mgr.vmPool.Count())
+ for index := 0; index < mgr.vmPool.Count(); index++ {
+ index := index
+ go func() {
+ for {
+ log.Error(mgr.snapshotVM(index))
+ }
+ }()
+ }
+ select {}
+}
+
+func (mgr *Manager) snapshotVM(index int) error {
+ inst, err := mgr.vmPool.Create(index)
+ if err != nil {
+ return err
+ }
+ defer inst.Close()
+ executor, err := inst.Copy(mgr.cfg.ExecutorBin)
+ if err != nil {
+ return err
+ }
+ // All network connections (including ssh) will break once we start restoring snapshots.
+ // So we start a background process and log to /dev/kmsg.
+ cmd := fmt.Sprintf("nohup %v exec snapshot 1>/dev/null 2>/dev/kmsg </dev/null &", executor)
+ if _, _, err := inst.Run(time.Hour, mgr.reporter, cmd); err != nil {
+ return err
+ }
+
+ builder := flatbuffers.NewBuilder(0)
+ var envFlags flatrpc.ExecEnv
+ for first := true; ; first = false {
+ queue.StatExecs.Add(1)
+ req := mgr.source.Next()
+ if first {
+ envFlags = req.ExecOpts.EnvFlags
+ if err := mgr.snapshotSetup(inst, builder, envFlags); err != nil {
+ req.Done(&queue.Result{Status: queue.Crashed})
+ return err
+ }
+ }
+ if envFlags != req.ExecOpts.EnvFlags {
+ panic(fmt.Sprintf("request env flags has changed: 0x%x -> 0x%x",
+ envFlags, req.ExecOpts.EnvFlags))
+ }
+
+ res, output, err := mgr.snapshotRun(inst, builder, req)
+ if err != nil {
+ req.Done(&queue.Result{Status: queue.Crashed})
+ return err
+ }
+
+ if mgr.reporter.ContainsCrash(output) {
+ res.Status = queue.Crashed
+ rep := mgr.reporter.Parse(output)
+ buf := new(bytes.Buffer)
+ fmt.Fprintf(buf, "program:\n%s\n", req.Prog.Serialize())
+ buf.Write(rep.Output)
+ rep.Output = buf.Bytes()
+ mgr.crashes <- &Crash{Report: rep}
+ }
+
+ req.Done(res)
+ }
+}
+
+func (mgr *Manager) snapshotSetup(inst *vm.Instance, builder *flatbuffers.Builder, env flatrpc.ExecEnv) error {
+ msg := flatrpc.SnapshotHandshakeT{
+ CoverEdges: mgr.cfg.Experimental.CoverEdges,
+ Kernel64Bit: mgr.cfg.SysTarget.PtrSize == 8,
+ Slowdown: int32(mgr.cfg.Timeouts.Slowdown),
+ SyscallTimeoutMs: int32(mgr.cfg.Timeouts.Syscall / time.Millisecond),
+ ProgramTimeoutMs: int32(mgr.cfg.Timeouts.Program / time.Millisecond),
+ Features: mgr.enabledFeatures,
+ EnvFlags: env,
+ SandboxArg: mgr.cfg.SandboxArg,
+ }
+ builder.Reset()
+ builder.Finish(msg.Pack(builder))
+ return inst.SetupSnapshot(builder.FinishedBytes())
+}
+
+func (mgr *Manager) snapshotRun(inst *vm.Instance, builder *flatbuffers.Builder, req *queue.Request) (
+ *queue.Result, []byte, error) {
+ progData, err := req.Prog.SerializeForExec()
+ if err != nil {
+ queue.StatExecBufferTooSmall.Add(1)
+ return &queue.Result{Status: queue.ExecFailure}, nil, nil
+ }
+ msg := flatrpc.SnapshotRequestT{
+ ExecFlags: req.ExecOpts.ExecFlags,
+ NumCalls: int32(len(req.Prog.Calls)),
+ ProgData: progData,
+ }
+ for _, call := range req.ReturnAllSignal {
+ if call < 0 {
+ msg.AllExtraSignal = true
+ } else {
+ msg.AllCallSignal |= 1 << call
+ }
+ }
+ builder.Reset()
+ builder.Finish(msg.Pack(builder))
+
+ start := time.Now()
+ res, output, err := inst.RunSnapshot(builder.FinishedBytes())
+ if err != nil {
+ return nil, nil, err
+ }
+ elapsed := time.Since(start)
+ queue.StatExecs.Add(1)
+
+ execError := ""
+ var info *flatrpc.ProgInfo
+ if len(res) > 4 {
+ res = res[4:]
+ // TODO: use more robust parsing from pkg/flatrpc/conn.go.
+ var raw flatrpc.ExecutorMessageRaw
+ raw.Init(res, flatbuffers.GetUOffsetT(res))
+ union := raw.UnPack()
+ if union.Msg != nil && union.Msg.Value != nil {
+ msg := union.Msg.Value.(*flatrpc.ExecResult)
+ if msg.Info != nil {
+ msg.Info.Elapsed = uint64(elapsed)
+ for len(msg.Info.Calls) < len(req.Prog.Calls) {
+ msg.Info.Calls = append(msg.Info.Calls, &flatrpc.CallInfo{
+ Error: 999,
+ })
+ }
+ msg.Info.Calls = msg.Info.Calls[:len(req.Prog.Calls)]
+ if len(msg.Info.ExtraRaw) != 0 {
+ msg.Info.Extra = msg.Info.ExtraRaw[0]
+ for _, info := range msg.Info.ExtraRaw[1:] {
+ msg.Info.Extra.Cover = append(msg.Info.Extra.Cover, info.Cover...)
+ msg.Info.Extra.Signal = append(msg.Info.Extra.Signal, info.Signal...)
+ }
+ msg.Info.ExtraRaw = nil
+ }
+ }
+ info = msg.Info
+ execError = msg.Error
+ }
+ }
+ status := queue.Success
+ var resErr error
+ if execError != "" {
+ status = queue.ExecFailure
+ resErr = errors.New(execError)
+ }
+ return &queue.Result{
+ Status: status,
+ Info: info,
+ Output: output,
+ Err: resErr,
+ }, nil, nil
+}
diff --git a/vm/qemu/qemu.go b/vm/qemu/qemu.go
index d8f0dd36c..858339c23 100644
--- a/vm/qemu/qemu.go
+++ b/vm/qemu/qemu.go
@@ -111,6 +111,7 @@ type instance struct {
qemu *exec.Cmd
merger *vmimpl.OutputMerger
files map[string]string
+ *snapshot
}
type archConfig struct {
@@ -371,6 +372,9 @@ func (pool *Pool) ctor(workdir, sshkey, sshuser string, index int) (*instance, e
sshkey: sshkey,
sshuser: sshuser,
}
+ if pool.env.Snapshot {
+ inst.snapshot = new(snapshot)
+ }
if st, err := os.Stat(inst.image); err == nil && st.Size() == 0 {
// Some kernels may not need an image, however caller may still
// want to pass us a fake empty image because the rest of syzkaller
@@ -415,6 +419,9 @@ func (inst *instance) Close() error {
if inst.mon != nil {
inst.mon.Close()
}
+ if inst.snapshot != nil {
+ inst.snapshotClose()
+ }
return nil
}
@@ -463,6 +470,12 @@ func (inst *instance) boot() error {
}
}()
+ if inst.snapshot != nil {
+ if err := inst.snapshotHandshake(); err != nil {
+ return err
+ }
+ }
+
if err := vmimpl.WaitForSSH(inst.debug, 10*time.Minute*inst.timeouts.Scale, "localhost",
inst.sshkey, inst.sshuser, inst.os, inst.port, inst.merger.Err, false); err != nil {
bootOutputStop <- true
@@ -555,6 +568,13 @@ func (inst *instance) buildQemuArgs() ([]string, error) {
"-device", "isa-applesmc,osk="+inst.cfg.AppleSmcOsk,
)
}
+ if inst.snapshot != nil {
+ snapshotArgs, err := inst.snapshotEnable()
+ if err != nil {
+ return nil, err
+ }
+ args = append(args, snapshotArgs...)
+ }
return args, nil
}
diff --git a/vm/qemu/snapshot_linux.go b/vm/qemu/snapshot_linux.go
new file mode 100644
index 000000000..5a30fa382
--- /dev/null
+++ b/vm/qemu/snapshot_linux.go
@@ -0,0 +1,246 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package qemu
+
+import (
+ "encoding/binary"
+ "fmt"
+ "net"
+ "path/filepath"
+ "sync/atomic"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "github.com/google/syzkaller/pkg/flatrpc"
+ "golang.org/x/sys/unix"
+)
+
+type snapshot struct {
+ ivsListener *net.UnixListener
+ ivsConn *net.UnixConn
+ doorbellFD int
+ eventFD int
+ shmemFD int
+ shmem []byte
+ input []byte
+ header *flatrpc.SnapshotHeaderT
+}
+
+func (inst *instance) snapshotClose() {
+ if inst.ivsListener != nil {
+ inst.ivsListener.Close()
+ }
+ if inst.ivsConn != nil {
+ inst.ivsConn.Close()
+ }
+ if inst.doorbellFD != 0 {
+ syscall.Close(inst.doorbellFD)
+ }
+ if inst.eventFD != 0 {
+ syscall.Close(inst.eventFD)
+ }
+ if inst.shmemFD != 0 {
+ syscall.Close(inst.shmemFD)
+ }
+ if inst.shmem != nil {
+ syscall.Munmap(inst.shmem)
+ }
+}
+
+func (inst *instance) snapshotEnable() ([]string, error) {
+ // We use ivshmem device (Inter-VM Shared Memory) for communication with the VM,
+ // it allows to have a shared memory region directly accessible by both host and target:
+ // https://www.qemu.org/docs/master/system/devices/ivshmem.html
+ //
+ // The shared memory region is not restored as part of snapshot restore since we set:
+ // migrate_set_capability x-ignore-shared on
+ // This allows to write a new input into ivshmem before each restore.
+ //
+ // We also use doorbell (interrupt) capability of ivshmem to notify host about
+ // program execution completion. Doorbell also allows to send interrupts in the other direction
+ // (from host to target), but we don't need/use this since we arrange things such that
+ // snapshot restore serves as a signal to execute new input.
+ //
+ // Ideally we use a single ivshmem device for both purposes (shmem+doorbell).
+ // But unfortunately it seems that the doorbell device is always restored on snapshot restore
+ // (at least I did not find a way to make it not restored, maybe can be solved with qemu change).
+ // So we use 2 separate devices for these purposes.
+ shmemFD, err := unix.MemfdCreate("syz-qemu-shmem", 0)
+ if err != nil {
+ return nil, fmt.Errorf("qemu: memfd_create failed: %w", err)
+ }
+ inst.shmemFD = shmemFD
+ if err := syscall.Ftruncate(shmemFD, int64(flatrpc.ConstSnapshotShmemSize)); err != nil {
+ return nil, fmt.Errorf("qemu: ftruncate failed: %w", err)
+ }
+ shmem, err := syscall.Mmap(shmemFD, 0, int(flatrpc.ConstSnapshotShmemSize),
+ syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)
+ if err != nil {
+ return nil, fmt.Errorf("qemu: shmem mmap failed: %w", err)
+ }
+ inst.shmem = shmem
+ inst.input = shmem[:flatrpc.ConstMaxInputSize:flatrpc.ConstMaxInputSize]
+ inst.header = (*flatrpc.SnapshotHeaderT)(unsafe.Pointer(&shmem[flatrpc.ConstMaxInputSize]))
+ shmemFile := fmt.Sprintf("/proc/%v/fd/%v", syscall.Getpid(), shmemFD)
+
+ doorbellFD, err := unix.MemfdCreate("syz-qemu-doorbell", 0)
+ if err != nil {
+ return nil, fmt.Errorf("qemu: memfd_create failed: %w", err)
+ }
+ if err := syscall.Ftruncate(doorbellFD, int64(flatrpc.ConstSnapshotDoorbellSize)); err != nil {
+ return nil, fmt.Errorf("qemu: ftruncate failed: %w", err)
+ }
+ inst.doorbellFD = doorbellFD
+
+ eventFD, err := unix.Eventfd(0, unix.EFD_SEMAPHORE)
+ if err != nil {
+ return nil, fmt.Errorf("qemu: eventfd failed: %w", err)
+ }
+ inst.eventFD = eventFD
+
+ sockPath := filepath.Join(inst.workdir, "ivs.sock")
+ ln, err := net.ListenUnix("unix", &net.UnixAddr{Name: sockPath, Net: "unix"})
+ if err != nil {
+ return nil, fmt.Errorf("qemu: unix listen on %v failed: %w", sockPath, err)
+ }
+ inst.ivsListener = ln
+
+ return []string{
+ // migratable=on is required to take snapshots.
+ // tsc=off disables RDTSC timestamp counter, it's not virtualized/restored as part of snapshots,
+ // so the target kernel sees a large jump in time and always declares TSC as unstable after restore.
+ "-cpu", "host,migratable=on,tsc=off",
+ "-chardev", fmt.Sprintf("socket,path=%v,id=snapshot-doorbell", sockPath),
+ "-device", "ivshmem-doorbell,master=on,vectors=1,chardev=snapshot-doorbell",
+ "-device", "ivshmem-plain,master=on,memdev=snapshot-shmem",
+ "-object", fmt.Sprintf("memory-backend-file,size=%v,share=on,discard-data=on,id=snapshot-shmem,mem-path=%v",
+ uint64(flatrpc.ConstSnapshotShmemSize), shmemFile),
+ }, nil
+}
+
+func (inst *instance) snapshotHandshake() error {
+ // ivshmem-doorbell expects an external server that communicates via a unix socket.
+ // The protocol is not documented, for details see:
+ // https://github.com/qemu/qemu/blob/master/hw/misc/ivshmem.c
+ // https://github.com/qemu/qemu/blob/master/contrib/ivshmem-server/ivshmem-server.c
+ conn, err := inst.ivsListener.AcceptUnix()
+ if err != nil {
+ return fmt.Errorf("qemu: unix accept failed: %w", err)
+ }
+ inst.ivsListener.Close()
+ inst.ivsListener = nil
+ inst.ivsConn = conn
+
+ msg := make([]byte, 8)
+ // Send protocol version 0.
+ binary.LittleEndian.PutUint64(msg, 0)
+ if _, err := conn.Write(msg); err != nil {
+ return fmt.Errorf("qemu: ivs conn write failed: %w", err)
+ }
+ // Send VM id 0.
+ binary.LittleEndian.PutUint64(msg, 0)
+ if _, err := conn.Write(msg); err != nil {
+ return fmt.Errorf("qemu: ivs conn write failed: %w", err)
+ }
+ // Send shared memory file FD.
+ binary.LittleEndian.PutUint64(msg, ^uint64(0))
+ rights := syscall.UnixRights(inst.doorbellFD)
+ if _, _, err := conn.WriteMsgUnix(msg, rights, nil); err != nil {
+ return fmt.Errorf("qemu: ivs conn sendmsg failed: %w", err)
+ }
+ // Send event FD for VM 1 interrupt vector 0.
+ binary.LittleEndian.PutUint64(msg, 1)
+ rights = syscall.UnixRights(inst.eventFD)
+ if _, _, err := conn.WriteMsgUnix(msg, rights, nil); err != nil {
+ return fmt.Errorf("qemu: ivs conn sendmsg failed: %w", err)
+ }
+ return nil
+}
+
+func (inst *instance) SetupSnapshot(input []byte) error {
+ copy(inst.input, input)
+ // Tell executor that we are ready to snapshot and wait for an ack.
+ inst.header.UpdateState(flatrpc.SnapshotStateHandshake)
+ if !inst.waitSnapshotStateChange(flatrpc.SnapshotStateHandshake, 10*time.Minute) {
+ return fmt.Errorf("executor does not start snapshot handshake")
+ }
+ if _, err := inst.hmp("migrate_set_capability x-ignore-shared on", 0); err != nil {
+ return err
+ }
+ if _, err := inst.hmp("savevm syz", 0); err != nil {
+ return err
+ }
+ if inst.debug {
+ inst.hmp("info snapshots", 0) // this prints size of the snapshot
+ }
+ inst.header.UpdateState(flatrpc.SnapshotStateSnapshotted)
+ if !inst.waitSnapshotStateChange(flatrpc.SnapshotStateSnapshotted, time.Minute) {
+ return fmt.Errorf("executor has not confirmed snapshot handshake")
+ }
+ return nil
+}
+
+func (inst *instance) RunSnapshot(timeout time.Duration, input []byte) (result, output []byte, err error) {
+ copy(inst.input, input)
+ inst.header.OutputOffset = 0
+ inst.header.OutputSize = 0
+ inst.header.UpdateState(flatrpc.SnapshotStateExecute)
+ if _, err := inst.hmp("loadvm syz", 0); err != nil {
+ return nil, nil, err
+ }
+ inst.waitSnapshotStateChange(flatrpc.SnapshotStateExecute, timeout)
+ resStart := int(flatrpc.ConstMaxInputSize) + int(atomic.LoadUint32(&inst.header.OutputOffset))
+ resEnd := resStart + int(atomic.LoadUint32(&inst.header.OutputSize))
+ var res []byte
+ if resEnd <= len(inst.shmem) {
+ res = inst.shmem[resStart:resEnd:resEnd]
+ }
+ output = inst.readOutput()
+ return res, output, nil
+}
+
+func (inst *instance) waitSnapshotStateChange(state flatrpc.SnapshotState, timeout time.Duration) bool {
+ deadline := time.Now().Add(timeout)
+ timeoutMs := int(timeout / time.Millisecond)
+ fds := []unix.PollFd{{
+ Fd: int32(inst.eventFD),
+ Events: unix.POLLIN,
+ }}
+ for {
+ if n, _ := unix.Poll(fds, timeoutMs); n == 1 {
+ var buf [8]byte
+ syscall.Read(inst.eventFD, buf[:])
+ }
+ if inst.header.LoadState() != state {
+ return true
+ }
+ remain := time.Until(deadline)
+ if remain < time.Millisecond {
+ return false
+ }
+ timeoutMs = int(remain / time.Millisecond)
+ }
+}
+
+func (inst *instance) readOutput() []byte {
+ var output []byte
+ // If output channel has overflown, then wait for more output from the merger goroutine.
+ wait := cap(inst.merger.Output)
+ for {
+ select {
+ case out := <-inst.merger.Output:
+ output = append(output, out...)
+ wait--
+ default:
+ if wait > 0 {
+ return output
+ }
+ // After the first overflow we wait after every read because the goroutine
+ // may be running and sending more output to the channel concurrently.
+ wait = 1
+ time.Sleep(10 * time.Millisecond)
+ }
+ }
+}
diff --git a/vm/qemu/snapshot_unimpl.go b/vm/qemu/snapshot_unimpl.go
new file mode 100644
index 000000000..ab9438a47
--- /dev/null
+++ b/vm/qemu/snapshot_unimpl.go
@@ -0,0 +1,33 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+//go:build !linux
+
+package qemu
+
+import (
+ "fmt"
+)
+
+type snapshot struct{}
+
+var errNotImplemented = fmt.Errorf("snapshots are not implemeneted")
+
+func (inst *instance) snapshotClose() {
+}
+
+func (inst *instance) snapshotEnable() ([]string, error) {
+ return nil, errNotImplemented
+}
+
+func (inst *instance) snapshotHandshake() error {
+ return errNotImplemented
+}
+
+func (inst *instance) SetupSnapshot(input []byte) error {
+ return errNotImplemented
+}
+
+func (inst *instance) RunSnapshot(input []byte) (result, output []byte, err error) {
+ return nil, nil, errNotImplemented
+}