aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2024-06-04 12:55:41 +0200
committerDmitry Vyukov <dvyukov@google.com>2024-06-24 09:57:34 +0000
commite16e2c9a4cb6937323e861b646792a6c4c978a3c (patch)
tree6c513e98e5f465b44a98546d8984485d2c128582
parent90d67044dab68568e8f35bc14b68055dbd166eff (diff)
executor: add runner mode
Move all syz-fuzzer logic into syz-executor and remove syz-fuzzer. Also restore syz-runtest functionality in the manager. Update #4917 (sets most signal handlers to SIG_IGN)
-rw-r--r--.clang-format1
-rw-r--r--Makefile20
-rw-r--r--docs/internals.md9
-rw-r--r--docs/setup_syzbot.md6
-rw-r--r--docs/syz_verifier.md7
-rw-r--r--docs/troubleshooting.md6
-rw-r--r--executor/common.h19
-rw-r--r--executor/conn.h192
-rw-r--r--executor/cover_filter.h8
-rw-r--r--executor/executor.cc735
-rw-r--r--executor/executor_linux.h17
-rw-r--r--executor/executor_runner.h801
-rw-r--r--executor/files.h85
-rw-r--r--executor/shmem.h19
-rw-r--r--executor/style_test.go5
-rw-r--r--executor/subprocess.h129
-rw-r--r--executor/test.h2
-rw-r--r--pkg/csource/options.go58
-rw-r--r--pkg/flatrpc/conn.go60
-rw-r--r--pkg/flatrpc/conn_test.go67
-rw-r--r--pkg/flatrpc/flatrpc.fbs25
-rw-r--r--pkg/flatrpc/flatrpc.go537
-rw-r--r--pkg/flatrpc/flatrpc.h523
-rw-r--r--pkg/flatrpc/helpers.go34
-rw-r--r--pkg/fuzzer/fuzzer.go2
-rw-r--r--pkg/fuzzer/fuzzer_test.go176
-rw-r--r--pkg/fuzzer/job.go23
-rw-r--r--pkg/fuzzer/queue/queue.go27
-rw-r--r--pkg/host/features.go80
-rw-r--r--pkg/host/machine_info.go49
-rw-r--r--pkg/instance/instance.go49
-rw-r--r--pkg/ipc/gate.go76
-rw-r--r--pkg/ipc/ipc.go838
-rw-r--r--pkg/ipc/ipc_priv_test.go32
-rw-r--r--pkg/ipc/ipc_test.go262
-rw-r--r--pkg/ipc/ipcconfig/ipcconfig.go56
-rw-r--r--pkg/mgrconfig/load.go7
-rw-r--r--pkg/report/fuchsia.go2
-rw-r--r--pkg/report/linux.go6
-rw-r--r--pkg/report/testdata/fuchsia/report/630
-rw-r--r--pkg/rpcserver/last_executing.go (renamed from syz-manager/last_executing.go)6
-rw-r--r--pkg/rpcserver/last_executing_test.go56
-rw-r--r--pkg/rpcserver/local.go138
-rw-r--r--pkg/rpcserver/rpcserver.go (renamed from syz-manager/rpc.go)463
-rw-r--r--pkg/runtest/executor_test.go131
-rw-r--r--pkg/runtest/run.go253
-rw-r--r--pkg/runtest/run_test.go175
-rw-r--r--pkg/vminfo/features.go20
-rw-r--r--pkg/vminfo/syscalls.go32
-rw-r--r--pkg/vminfo/vminfo.go17
-rw-r--r--pkg/vminfo/vminfo_test.go66
-rw-r--r--sys/linux/init.go21
-rw-r--r--sys/test/exec.txt2
-rw-r--r--syz-ci/updater.go1
-rw-r--r--syz-fuzzer/fuzzer.go304
-rw-r--r--syz-fuzzer/fuzzer_test.go56
-rw-r--r--syz-fuzzer/proc.go166
-rw-r--r--syz-manager/covfilter.go10
-rw-r--r--syz-manager/http.go107
-rw-r--r--syz-manager/hub.go26
-rw-r--r--syz-manager/last_executing_test.go56
-rw-r--r--syz-manager/manager.go400
-rw-r--r--syz-manager/stats.go5
-rw-r--r--syz-runner/runner.go144
-rw-r--r--tools/syz-execprog/execprog.go427
-rw-r--r--tools/syz-runtest/runtest.go321
-rw-r--r--vm/adb/adb.go2
-rw-r--r--vm/gvisor/gvisor.go8
-rw-r--r--vm/qemu/qemu.go11
-rw-r--r--vm/vm.go30
-rw-r--r--vm/vm_test.go17
-rw-r--r--vm/vmimpl/vmimpl.go8
72 files changed, 4159 insertions, 4400 deletions
diff --git a/.clang-format b/.clang-format
index da47af9bf..ecf38febc 100644
--- a/.clang-format
+++ b/.clang-format
@@ -12,4 +12,5 @@ AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
ColumnLimit: 0
AccessModifierOffset: -8
+PackConstructorInitializers: Never
diff --git a/Makefile b/Makefile
index a778d9f65..d3ec3ddcb 100644
--- a/Makefile
+++ b/Makefile
@@ -98,8 +98,7 @@ ifeq ("$(TARGETOS)", "trusty")
endif
.PHONY: all clean host target \
- manager runtest fuzzer executor \
- ci hub \
+ manager executor ci hub \
execprog mutate prog2c trace2syz repro upgrade db \
usbgen symbolize cover kconf syz-build crush \
bin/syz-extract bin/syz-fmt \
@@ -112,8 +111,8 @@ endif
presubmit_arch_executor presubmit_dashboard presubmit_race presubmit_race_dashboard presubmit_old
all: host target
-host: manager runtest repro mutate prog2c db upgrade
-target: fuzzer execprog executor
+host: manager repro mutate prog2c db upgrade
+target: execprog executor
executor: descriptions
ifeq ($(TARGETOS),fuchsia)
@@ -156,13 +155,6 @@ descriptions:
manager: descriptions
GOOS=$(HOSTOS) GOARCH=$(HOSTARCH) $(HOSTGO) build $(GOHOSTFLAGS) -o ./bin/syz-manager github.com/google/syzkaller/syz-manager
-runtest: descriptions
- # TODO: fold syz-runtest into syz-manager.
- # GOOS=$(HOSTOS) GOARCH=$(HOSTARCH) $(HOSTGO) build $(GOHOSTFLAGS) -o ./bin/syz-runtest github.com/google/syzkaller/tools/syz-runtest
-
-fuzzer: descriptions
- GOOS=$(TARGETGOOS) GOARCH=$(TARGETGOARCH) $(GO) build $(GOTARGETFLAGS) -o ./bin/$(TARGETOS)_$(TARGETVMARCH)/syz-fuzzer$(EXE) github.com/google/syzkaller/syz-fuzzer
-
execprog: descriptions
GOOS=$(TARGETGOOS) GOARCH=$(TARGETGOARCH) $(GO) build $(GOTARGETFLAGS) -o ./bin/$(TARGETOS)_$(TARGETVMARCH)/syz-execprog$(EXE) github.com/google/syzkaller/tools/syz-execprog
@@ -215,13 +207,9 @@ bisect: descriptions
GOOS=$(HOSTOS) GOARCH=$(HOSTARCH) $(HOSTGO) build $(GOHOSTFLAGS) -o ./bin/syz-bisect github.com/google/syzkaller/tools/syz-bisect
verifier: descriptions
- # TODO: switch syz-verifier to use syz-fuzzer.
+ # TODO: switch syz-verifier to use syz-executor.
# GOOS=$(HOSTOS) GOARCH=$(HOSTARCH) $(HOSTGO) build $(GOHOSTFLAGS) -o ./bin/syz-verifier github.com/google/syzkaller/syz-verifier
-runner: descriptions
- # TODO: switch syz-verifier to use syz-fuzzer.
- # GOOS=$(TARGETGOOS) GOARCH=$(TARGETGOARCH) $(GO) build $(GOTARGETFLAGS) -o ./bin/$(TARGETOS)_$(TARGETVMARCH)/syz-runner$(EXE) github.com/google/syzkaller/syz-runner
-
# `extract` extracts const files from various kernel sources, and may only
# re-generate parts of files.
extract: bin/syz-extract
diff --git a/docs/internals.md b/docs/internals.md
index 7d192b738..b4d893791 100644
--- a/docs/internals.md
+++ b/docs/internals.md
@@ -18,14 +18,13 @@ red labels indicate corresponding configuration options.
It runs on a host with a stable kernel which does not experience white-noise fuzzer load.
-`syz-manager` starts `syz-fuzzer` processes (one inside each VM).
-`syz-fuzzer`s comminucate with `syz-manager` over RPC to receive the programs
+`syz-manager` starts `syz-executor` processes (one inside each VM).
+`syz-executor`s comminucate with `syz-manager` over RPC to receive the programs
that must be executed and to report back the results (error statuses, collected coverage, etc.).
-To execute programs, `syz-fuzzer` starts transient `syz-executor` processes.
+To execute programs, `syz-executor` starts transient subprocesses.
-Each `syz-executor` process executes a single input (a sequence of syscalls).
-It accepts the program to execute from the `syz-fuzzer` process and sends results back.
+Each transient subprocess executes a single input (a sequence of syscalls).
It is designed to be as simple as possible (to not interfere with fuzzing process),
written in C++, compiled as static binary and uses shared memory for communication.
diff --git a/docs/setup_syzbot.md b/docs/setup_syzbot.md
index 060efcc6d..b6c05b00b 100644
--- a/docs/setup_syzbot.md
+++ b/docs/setup_syzbot.md
@@ -4,7 +4,9 @@ This doc will be useful to you:
- should you wish to hack on user interface bits like the dashboard / mailing list integration or
- should you wish to continuously run a separate syzbot dashboard for your own kernels
-Note: For most development purposes you don't need a full syzbot setup. The meat of syzkaller is really located in syz-manager, syz-fuzzer and syz-executor. You can run syz-manager directly which is usually what you will want to do during fuzzer development. [See this documentation for syz-manager setup instructions](setup.md).
+Note: For most development purposes you don't need a full syzbot setup. The meat of syzkaller is really located
+in syz-manager and syz-executor. You can run syz-manager directly which is usually what you will want to do during
+fuzzer development. [See this documentation for syz-manager setup instructions](setup.md).
This doc assumes that you:
- have a GCP account and billing setup
@@ -366,4 +368,4 @@ sudo journalctl -fu syz-ci
```
gcloud app browse --project=$PROJECT
```
-Once syzkaller finds the first crashes they should show up here. This might take a while. \ No newline at end of file
+Once syzkaller finds the first crashes they should show up here. This might take a while.
diff --git a/docs/syz_verifier.md b/docs/syz_verifier.md
index f2a1437a9..798965cef 100644
--- a/docs/syz_verifier.md
+++ b/docs/syz_verifier.md
@@ -107,10 +107,3 @@ ERRNO mismatches found for program:
The order of the results is given by the order in which configuration files
were passed so `Pool: 0 ` reports results for the kernel created using
`kernel0.cfg` and so on.
-
-The [Flags](/pkg/ipc/ipc.go#L82) can be used to determine the state reached by
-the system call:
-* `0` = syscall not even started
-* `1` = syscall started
-* `3` = syscall finished executing
-* `7` = syscall blocked
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
index 7b1d77b51..930adbc04 100644
--- a/docs/troubleshooting.md
+++ b/docs/troubleshooting.md
@@ -3,11 +3,11 @@
Here are some things to check if there are problems running syzkaller.
- Use the `-debug` command line option to make syzkaller print all possible debug output,
- from both the `syz-manager` top-level program and the `syz-fuzzer` instances. With this option
- syzkaller will only run one VM instance.
+ from both the `syz-manager` top-level program and the `syz-executor` instances.
+ With this option syzkaller will only run one VM instance.
- Use the `-vv N` command line option to increase the amount of logging output, from both
- the `syz-manager` top-level program and the `syz-fuzzer` instances (which go to the
+ the `syz-manager` top-level program and the `syz-executor` instances (which go to the
output files in the `crashes` subdirectory of the working directory). Higher values of
N give more output.
diff --git a/executor/common.h b/executor/common.h
index 3a735a086..243d388c2 100644
--- a/executor/common.h
+++ b/executor/common.h
@@ -226,7 +226,7 @@ static void use_temporary_dir(void)
#endif
#if GOOS_netbsd || GOOS_freebsd || GOOS_darwin || GOOS_openbsd || GOOS_test
-#if (SYZ_EXECUTOR || SYZ_REPEAT) && SYZ_EXECUTOR_USES_FORK_SERVER && (SYZ_EXECUTOR || SYZ_USE_TMP_DIR)
+#if SYZ_EXECUTOR || SYZ_REPEAT && SYZ_USE_TMP_DIR && SYZ_EXECUTOR_USES_FORK_SERVER
#include <dirent.h>
#include <errno.h>
#include <stdio.h>
@@ -594,10 +594,6 @@ static void loop(void)
#if SYZ_EXECUTOR || SYZ_REPEAT
static void execute_one(void);
-#if SYZ_EXECUTOR_USES_FORK_SERVER
-#include <signal.h>
-#include <sys/types.h>
-#include <sys/wait.h>
#if GOOS_linux
#define WAIT_FLAGS __WALL
@@ -605,9 +601,10 @@ static void execute_one(void);
#define WAIT_FLAGS 0
#endif
-#if SYZ_EXECUTOR
-static void reply_handshake();
-#endif
+#if SYZ_EXECUTOR_USES_FORK_SERVER
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/wait.h>
static void loop(void)
{
@@ -616,7 +613,7 @@ static void loop(void)
#endif
#if SYZ_EXECUTOR
// Tell parent that we are ready to serve.
- reply_handshake();
+ reply_execute(0);
#endif
int iter = 0;
#if SYZ_REPEAT_TIMES
@@ -675,7 +672,7 @@ static void loop(void)
uint64 start = current_time_ms();
#if SYZ_EXECUTOR
uint64 last_executed = start;
- uint32 executed_calls = __atomic_load_n(output_data, __ATOMIC_RELAXED);
+ uint32 executed_calls = output_data->completed.load(std::memory_order_relaxed);
#endif
for (;;) {
sleep_ms(10);
@@ -695,7 +692,7 @@ static void loop(void)
uint64 min_timeout_ms = program_timeout_ms * 3 / 5;
uint64 inactive_timeout_ms = syscall_timeout_ms * 20;
uint64 now = current_time_ms();
- uint32 now_executed = __atomic_load_n(output_data, __ATOMIC_RELAXED);
+ uint32 now_executed = output_data->completed.load(std::memory_order_relaxed);
if (executed_calls != now_executed) {
executed_calls = now_executed;
last_executed = now;
diff --git a/executor/conn.h b/executor/conn.h
new file mode 100644
index 000000000..19026748d
--- /dev/null
+++ b/executor/conn.h
@@ -0,0 +1,192 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+#include <arpa/inet.h>
+#include <fcntl.h>
+#include <netdb.h>
+#include <string.h>
+#include <sys/select.h>
+#include <sys/socket.h>
+
+#include <vector>
+
+// Connection represents a client TCP connection.
+// It connects to the given addr:port and allows to send/receive
+// flatbuffers-encoded messages.
+class Connection
+{
+public:
+ Connection(const char* addr, const char* port)
+ : fd_(Connect(addr, port))
+ {
+ }
+
+ int FD() const
+ {
+ return fd_;
+ }
+
+ template <typename Msg>
+ void Send(const Msg& msg)
+ {
+ typedef typename Msg::TableType Raw;
+ auto off = Raw::Pack(fbb_, &msg);
+ fbb_.FinishSizePrefixed(off);
+ auto data = fbb_.GetBufferSpan();
+ Send(data.data(), data.size());
+ fbb_.Reset();
+ }
+
+ template <typename Msg>
+ void Recv(Msg& msg)
+ {
+ typedef typename Msg::TableType Raw;
+ flatbuffers::uoffset_t size;
+ Recv(&size, sizeof(size));
+ recv_buf_.resize(size);
+ Recv(recv_buf_.data(), size);
+ auto raw = flatbuffers::GetRoot<Raw>(recv_buf_.data());
+ raw->UnPackTo(&msg);
+ }
+
+ void Send(const void* data, size_t size)
+ {
+ for (size_t sent = 0; sent < size;) {
+ ssize_t n = write(fd_, static_cast<const char*>(data) + sent, size - sent);
+ if (n > 0) {
+ sent += n;
+ continue;
+ }
+ if (errno == EINTR)
+ continue;
+ if (errno == EAGAIN) {
+ sleep_ms(1);
+ continue;
+ }
+ failmsg("failed to send rpc", "fd=%d want=%zu sent=%zu n=%zd", fd_, size, sent, n);
+ }
+ }
+
+private:
+ const int fd_;
+ std::vector<char> recv_buf_;
+ flatbuffers::FlatBufferBuilder fbb_;
+
+ void Recv(void* data, size_t size)
+ {
+ for (size_t recv = 0; recv < size;) {
+ ssize_t n = read(fd_, static_cast<char*>(data) + recv, size - recv);
+ if (n > 0) {
+ recv += n;
+ continue;
+ }
+ if (errno == EINTR)
+ continue;
+ if (errno == EAGAIN) {
+ sleep_ms(1);
+ continue;
+ }
+ failmsg("failed to recv rpc", "fd=%d want=%zu sent=%zu n=%zd", fd_, size, recv, n);
+ }
+ }
+
+ static int Connect(const char* addr, const char* ports)
+ {
+ int port = atoi(ports);
+ if (port == 0)
+ failmsg("failed to parse manager port", "port=%s", ports);
+ if (!strcmp(addr, "stdin"))
+ return STDIN_FILENO;
+ sockaddr_in saddr4 = {};
+ saddr4.sin_family = AF_INET;
+ saddr4.sin_port = htons(port);
+ if (inet_pton(AF_INET, addr, &saddr4.sin_addr))
+ return Connect(&saddr4, &saddr4.sin_addr, port);
+ sockaddr_in6 saddr6 = {};
+ saddr6.sin6_family = AF_INET6;
+ saddr6.sin6_port = htons(port);
+ if (inet_pton(AF_INET6, addr, &saddr6.sin6_addr))
+ return Connect(&saddr6, &saddr6.sin6_addr, port);
+ auto* hostent = gethostbyname(addr);
+ if (!hostent)
+ failmsg("failed to resolve manager addr", "addr=%s h_errno=%d", addr, h_errno);
+ for (char** addr = hostent->h_addr_list; *addr; addr++) {
+ int fd;
+ if (hostent->h_addrtype == AF_INET) {
+ memcpy(&saddr4.sin_addr, *addr, std::min<size_t>(hostent->h_length, sizeof(saddr4.sin_addr)));
+ fd = Connect(&saddr4, &saddr4.sin_addr, port);
+ } else if (hostent->h_addrtype == AF_INET6) {
+ memcpy(&saddr6.sin6_addr, *addr, std::min<size_t>(hostent->h_length, sizeof(saddr6.sin6_addr)));
+ fd = Connect(&saddr6, &saddr6.sin6_addr, port);
+ } else {
+ failmsg("unknown socket family", "family=%d", hostent->h_addrtype);
+ }
+ if (fd != -1)
+ return fd;
+ }
+ failmsg("can't connect to manager", "addr=%s:%s", addr, ports);
+ }
+
+ template <typename addr_t>
+ static int Connect(addr_t* addr, void* ip, int port)
+ {
+ auto* saddr = reinterpret_cast<sockaddr*>(addr);
+ int fd = socket(saddr->sa_family, SOCK_STREAM, IPPROTO_TCP);
+ if (fd == -1)
+ fail("failed to create socket");
+ char str[128] = {};
+ inet_ntop(saddr->sa_family, ip, str, sizeof(str));
+ if (connect(fd, saddr, sizeof(*addr))) {
+ printf("failed to connect to manager at %s:%d: %s\n", str, port, strerror(errno));
+ close(fd);
+ return -1;
+ }
+ return fd;
+ }
+
+ Connection(const Connection&) = delete;
+ Connection& operator=(const Connection&) = delete;
+};
+
+// Select is a wrapper around select system call.
+class Select
+{
+public:
+ Select()
+ {
+ FD_ZERO(&rdset_);
+ }
+
+ void Arm(int fd)
+ {
+ FD_SET(fd, &rdset_);
+ max_fd_ = std::max(max_fd_, fd);
+ }
+
+ bool Ready(int fd) const
+ {
+ return FD_ISSET(fd, &rdset_);
+ }
+
+ void Wait(int ms)
+ {
+ timespec timeout = {.tv_sec = ms / 1000, .tv_nsec = (ms % 1000) * 1000 * 1000};
+ if (pselect(max_fd_ + 1, &rdset_, nullptr, nullptr, &timeout, nullptr) < 0) {
+ if (errno != EINTR && errno != EAGAIN)
+ fail("pselect failed");
+ }
+ }
+
+ static void Prepare(int fd)
+ {
+ if (fcntl(fd, F_SETFL, fcntl(fd, F_GETFL, 0) | O_NONBLOCK))
+ fail("fcntl(O_NONBLOCK) failed");
+ }
+
+private:
+ fd_set rdset_;
+ int max_fd_ = -1;
+
+ Select(const Select&) = delete;
+ Select& operator=(const Select&) = delete;
+};
diff --git a/executor/cover_filter.h b/executor/cover_filter.h
index 672e9fbec..c303d8b23 100644
--- a/executor/cover_filter.h
+++ b/executor/cover_filter.h
@@ -26,13 +26,15 @@
class CoverFilter
{
public:
- CoverFilter(const char* file, void* preferred = nullptr)
- : shmem_(file, preferred, kMemSize), tab_(static_cast<Table*>(shmem_.Mem()))
+ CoverFilter()
+ : shmem_(kMemSize),
+ tab_(static_cast<Table*>(shmem_.Mem()))
{
}
CoverFilter(int fd, void* preferred = nullptr)
- : shmem_(fd, preferred, kMemSize, false), tab_(static_cast<Table*>(shmem_.Mem()))
+ : shmem_(fd, preferred, kMemSize, false),
+ tab_(static_cast<Table*>(shmem_.Mem()))
{
}
diff --git a/executor/executor.cc b/executor/executor.cc
index ca728a6aa..a4ea17f47 100644
--- a/executor/executor.cc
+++ b/executor/executor.cc
@@ -3,9 +3,6 @@
// +build
-// Currently this is unused (included only to test building).
-#include "pkg/flatrpc/flatrpc.h"
-
#include <algorithm>
#include <errno.h>
#include <limits.h>
@@ -18,12 +15,17 @@
#include <string.h>
#include <time.h>
+#include <atomic>
+#include <optional>
+
#if !GOOS_windows
#include <unistd.h>
#endif
#include "defs.h"
+#include "pkg/flatrpc/flatrpc.h"
+
#if defined(__GNUC__)
#define SYSCALLAPI
#define NORETURN __attribute__((noreturn))
@@ -75,6 +77,7 @@ typedef unsigned char uint8;
// Note: zircon max fd is 256.
// Some common_OS.h files know about this constant for RLIMIT_NOFILE.
const int kMaxFd = 250;
+const int kFdLimit = 256;
const int kMaxThreads = 32;
const int kInPipeFd = kMaxFd - 1; // remapped from stdin
const int kOutPipeFd = kMaxFd - 2; // remapped from stdout
@@ -90,8 +93,8 @@ const int kCoverOptimizedPreMmap = 3; // this many will be mmapped inside main()
const int kCoverDefaultCount = 6; // otherwise we only init kcov instances inside main()
// Logical error (e.g. invalid input program), use as an assert() alternative.
-// If such error happens 10+ times in a row, it will be detected as a bug by syz-fuzzer.
-// syz-fuzzer will fail and syz-manager will create a bug for this.
+// If such error happens 10+ times in a row, it will be detected as a bug by the runner process.
+// The runner will fail and syz-manager will create a bug for this.
// Note: err is used for bug deduplication, thus distinction between err (constant message)
// and msg (varying part).
static NORETURN void fail(const char* err);
@@ -118,12 +121,8 @@ void debug_dump_data(const char* data, int length);
#endif
static void receive_execute();
-static void reply_execute(int status);
-
-#if SYZ_EXECUTOR_USES_FORK_SERVER
+static void reply_execute(uint32 status);
static void receive_handshake();
-static void reply_handshake();
-#endif
#if SYZ_EXECUTOR_USES_FORK_SERVER
// Allocating (and forking) virtual memory for each executed process is expensive, so we only mmap
@@ -133,28 +132,133 @@ const int kMaxOutputCoverage = 6 << 20; // coverage is needed in ~ up to 1/3 of
const int kMaxOutputSignal = 4 << 20;
const int kMinOutput = 256 << 10; // if we don't need to send signal, the output is rather short.
const int kInitialOutput = kMinOutput; // the minimal size to be allocated in the parent process
+const int kMaxOutput = kMaxOutputComparisons;
#else
// We don't fork and allocate the memory only once, so prepare for the worst case.
const int kInitialOutput = 14 << 20;
+const int kMaxOutput = kInitialOutput;
#endif
+// For use with flatrpc bit flags.
+template <typename T>
+bool IsSet(T flags, T f)
+{
+ return (flags & f) != T::NONE;
+}
+
// TODO: allocate a smaller amount of memory in the parent once we merge the patches that enable
// prog execution with neither signal nor coverage. Likely 64kb will be enough in that case.
+const uint32 kMaxCalls = 64;
+
+struct alignas(8) OutputData {
+ std::atomic<uint32> size;
+ std::atomic<uint32> consumed;
+ std::atomic<uint32> completed;
+ struct {
+ // Call index in the test program (they may be out-of-order is some syscalls block).
+ int index;
+ // Offset of the CallInfo object in the output region.
+ flatbuffers::Offset<rpc::CallInfoRaw> offset;
+ } calls[kMaxCalls];
+
+ void Reset()
+ {
+ size.store(0, std::memory_order_relaxed);
+ consumed.store(0, std::memory_order_relaxed);
+ completed.store(0, std::memory_order_relaxed);
+ }
+};
+
+// ShmemAllocator/ShmemBuilder help to construct flatbuffers ExecResult reply message in shared memory.
+//
+// To avoid copying the reply (in particular coverage/signal/comparisons which may be large), the child
+// process starts forming CallInfo objects as it handles completion of syscalls, then the top-most runner
+// process uses these CallInfo to form an array of them, and adds ProgInfo object with a reference to the array.
+// In order to make this possible, OutputData object is placed at the beginning of the shared memory region,
+// and it records metadata required to start serialization in one process and continue later in another process.
+//
+// OutputData::size is the size of the whole shmem region that the child uses (it different size when coverage/
+// comparisons are requested). Note that flatbuffers serialization happens from the end of the buffer backwards.
+// OutputData::consumed records currently consumed amount memory in the shmem region so that the parent process
+// can continue from that point.
+// OutputData::completed records number of completed calls (entries in OutputData::calls arrays).
+// Flatbuffers identifies everything using offsets in the buffer, OutputData::calls::offset records this offset
+// for the call object so that we can use it in the parent process to construct the array of calls.
+//
+// FlatBufferBuilder generally grows the underlying buffer incrementally as necessary and copying data
+// (std::vector style). We cannot do this in the shared memory since we have only a single region.
+// To allow serialization into the shared memory region, ShmemBuilder passes initial buffer size which is equal
+// to the overall shmem region size (minus OutputData header size) to FlatBufferBuilder, and the custom
+// ShmemAllocator allocator. As the result, FlatBufferBuilder does exactly one allocation request
+// to ShmemAllocator and never reallocates (if we overflow the buffer and FlatBufferBuilder does another request,
+// ShmemAllocator will fail).
+class ShmemAllocator : public flatbuffers::Allocator
+{
+public:
+ ShmemAllocator(void* buf, size_t size)
+ : buf_(buf),
+ size_(size)
+ {
+ }
+
+private:
+ void* buf_;
+ size_t size_;
+ bool allocated_ = false;
+
+ uint8_t* allocate(size_t size) override
+ {
+ if (allocated_ || size != size_)
+ failmsg("bad allocate request", "allocated=%d size=%zu/%zu", allocated_, size_, size);
+ allocated_ = true;
+ return static_cast<uint8_t*>(buf_);
+ }
+
+ void deallocate(uint8_t* p, size_t size) override
+ {
+ if (!allocated_ || buf_ != p || size_ != size)
+ failmsg("bad deallocate request", "allocated=%d buf=%p/%p size=%zu/%zu",
+ allocated_, buf_, p, size_, size);
+ allocated_ = false;
+ }
+
+ uint8_t* reallocate_downward(uint8_t* old_p, size_t old_size,
+ size_t new_size, size_t in_use_back,
+ size_t in_use_front) override
+ {
+ fail("can't reallocate");
+ }
+};
+
+class ShmemBuilder : ShmemAllocator, public flatbuffers::FlatBufferBuilder
+{
+public:
+ ShmemBuilder(OutputData* data, size_t size)
+ : ShmemAllocator(data + 1, size - sizeof(*data)),
+ FlatBufferBuilder(size - sizeof(*data), this)
+ {
+ data->size.store(size, std::memory_order_relaxed);
+ size_t consumed = data->consumed.load(std::memory_order_relaxed);
+ if (consumed >= size - sizeof(*data))
+ failmsg("ShmemBuilder: too large output offset", "size=%zd consumed=%zd", size, consumed);
+ if (consumed)
+ FlatBufferBuilder::buf_.make_space(consumed);
+ }
+};
+
const int kInFd = 3;
const int kOutFd = 4;
-static uint32* output_data;
-static uint32* output_pos;
-static int output_size;
-static void mmap_output(int size);
-static uint32* write_output(uint32 v);
-static uint32* write_output_64(uint64 v);
-static void write_completed(uint32 completed);
+const int kMaxSignalFd = 5;
+const int kCoverFilterFd = 6;
+static OutputData* output_data;
+static std::optional<ShmemBuilder> output_builder;
+static uint32 output_size;
+static void mmap_output(uint32 size);
static uint32 hash(uint32 a);
static bool dedup(uint32 sig);
-uint64 start_time_ms = 0;
-
+static uint64 start_time_ms = 0;
static bool flag_debug;
static bool flag_coverage;
static bool flag_sandbox_none;
@@ -181,6 +285,10 @@ static bool flag_threaded;
// If true, then executor should write the comparisons data to fuzzer.
static bool flag_comparisons;
+static uint64 request_id;
+static uint64 all_call_signal;
+static bool all_extra_signal;
+
// Tunable timeouts, received with execute_req.
static uint64 syscall_timeout_ms;
static uint64 program_timeout_ms;
@@ -193,8 +301,8 @@ static bool in_execute_one = false;
#define SYZ_EXECUTOR 1
#include "common.h"
-const int kMaxInput = 4 << 20; // keep in sync with prog.ExecBufferSize
-const int kMaxCommands = 1000; // prog package knows about this constant (prog.execMaxCommands)
+const size_t kMaxInput = 4 << 20; // keep in sync with prog.ExecBufferSize
+const size_t kMaxCommands = 1000; // prog package knows about this constant (prog.execMaxCommands)
const uint64 instr_eof = -1;
const uint64 instr_copyin = -2;
@@ -294,29 +402,25 @@ struct res_t {
static res_t results[kMaxCommands];
const uint64 kInMagic = 0xbadc0ffeebadface;
-const uint32 kOutMagic = 0xbadf00d;
struct handshake_req {
uint64 magic;
- uint64 flags; // env flags
+ rpc::ExecEnv flags;
uint64 pid;
uint64 sandbox_arg;
- uint64 cover_filter_size;
- // Followed by uint64[cover_filter_size] filter.
-};
-
-struct handshake_reply {
- uint32 magic;
};
struct execute_req {
uint64 magic;
- uint64 env_flags;
+ uint64 id;
+ rpc::ExecEnv env_flags;
uint64 exec_flags;
uint64 pid;
uint64 syscall_timeout_ms;
uint64 program_timeout_ms;
uint64 slowdown_scale;
+ uint64 all_call_signal;
+ bool all_extra_signal;
};
struct execute_reply {
@@ -325,25 +429,6 @@ struct execute_reply {
uint32 status;
};
-// call_reply.flags
-const uint32 call_flag_executed = 1 << 0;
-const uint32 call_flag_finished = 1 << 1;
-const uint32 call_flag_blocked = 1 << 2;
-const uint32 call_flag_fault_injected = 1 << 3;
-
-struct call_reply {
- execute_reply header;
- uint32 magic;
- uint32 call_index;
- uint32 call_num;
- uint32 reserrno;
- uint32 flags;
- uint32 signal_size;
- uint32 cover_size;
- uint32 comps_size;
- // signal/cover/comps follow
-};
-
enum {
KCOV_CMP_CONST = 1,
KCOV_CMP_SIZE1 = 0,
@@ -359,11 +444,6 @@ struct kcov_comparison_t {
uint64 arg1;
uint64 arg2;
uint64 pc;
-
- bool ignore() const;
- void write();
- bool operator==(const struct kcov_comparison_t& other) const;
- bool operator<(const struct kcov_comparison_t& other) const;
};
typedef char kcov_comparison_size[sizeof(kcov_comparison_t) == 4 * sizeof(uint64) ? 1 : -1];
@@ -390,8 +470,8 @@ static uint64 swap(uint64 v, uint64 size, uint64 bf);
static void copyin(char* addr, uint64 val, uint64 size, uint64 bf, uint64 bf_off, uint64 bf_len);
static bool copyout(char* addr, uint64 size, uint64* res);
static void setup_control_pipes();
-static void setup_features(char** enable, int n);
static bool coverage_filter(uint64 pc);
+static std::tuple<rpc::ComparisonRaw, bool, bool> convert(const kcov_comparison_t& cmp);
#include "syscalls.h"
@@ -417,10 +497,16 @@ static feature_t features[] = {};
#include "shmem.h"
+#include "conn.h"
#include "cover_filter.h"
+#include "files.h"
+#include "subprocess.h"
+
+#include "executor_runner.h"
#include "test.h"
+static std::optional<CoverFilter> max_signal;
static std::optional<CoverFilter> cover_filter;
#if SYZ_HAVE_SANDBOX_ANDROID
@@ -429,13 +515,9 @@ static uint64 sandbox_arg = 0;
int main(int argc, char** argv)
{
- if (argc == 2 && strcmp(argv[1], "version") == 0) {
- puts(GOOS " " GOARCH " " SYZ_REVISION " " GIT_REVISION);
- return 0;
- }
- if (argc >= 2 && strcmp(argv[1], "setup") == 0) {
- setup_features(argv + 2, argc - 2);
- return 0;
+ if (argc >= 2 && strcmp(argv[1], "runner") == 0) {
+ runner(argv, argc);
+ fail("runner returned");
}
if (argc >= 2 && strcmp(argv[1], "leak") == 0) {
#if SYZ_HAVE_LEAK_CHECK
@@ -445,14 +527,6 @@ int main(int argc, char** argv)
#endif
return 0;
}
- if (argc >= 2 && strcmp(argv[1], "setup_kcsan_filterlist") == 0) {
-#if SYZ_HAVE_KCSAN
- setup_kcsan_filterlist(argv + 2, argc - 2, true);
-#else
- fail("KCSAN is not implemented");
-#endif
- return 0;
- }
if (argc >= 2 && strcmp(argv[1], "test") == 0)
return run_tests(argc == 3 ? argv[2] : nullptr);
@@ -482,12 +556,24 @@ int main(int argc, char** argv)
// For SYZ_EXECUTOR_USES_FORK_SERVER, close(kOutFd) is invoked in the forked child,
// after the program has been received.
+ if (fcntl(kMaxSignalFd, F_GETFD) != -1) {
+ // Use random addresses for coverage filters to not collide with output_data.
+ max_signal.emplace(kMaxSignalFd, reinterpret_cast<void*>(0x110c230000ull));
+ close(kMaxSignalFd);
+ }
+ if (fcntl(kCoverFilterFd, F_GETFD) != -1) {
+ cover_filter.emplace(kCoverFilterFd, reinterpret_cast<void*>(0x110f230000ull));
+ close(kCoverFilterFd);
+ }
+
use_temporary_dir();
install_segv_handler();
setup_control_pipes();
-#if SYZ_EXECUTOR_USES_FORK_SERVER
receive_handshake();
-#else
+#if !SYZ_EXECUTOR_USES_FORK_SERVER
+ // We receive/reply handshake when fork server is disabled just to simplify runner logic.
+ // It's a bit suboptimal, but no fork server is much slower anyway.
+ reply_execute(0);
receive_execute();
#endif
if (flag_coverage) {
@@ -537,10 +623,6 @@ int main(int argc, char** argv)
#if SYZ_EXECUTOR_USES_FORK_SERVER
fprintf(stderr, "loop exited with status %d\n", status);
- // Other statuses happen when fuzzer processes manages to kill loop, e.g. with:
- // ptrace(PTRACE_SEIZE, 1, 0, 0x100040)
- if (status != kFailStatus)
- status = 0;
// If an external sandbox process wraps executor, the out pipe will be closed
// before the sandbox process exits this will make ipc package kill the sandbox.
// As the result sandbox process will exit with exit status 9 instead of the executor
@@ -557,18 +639,18 @@ int main(int argc, char** argv)
// This method can be invoked as many times as one likes - MMAP_FIXED can overwrite the previous
// mapping without any problems. The only precondition - kOutFd must not be closed.
-static void mmap_output(int size)
+static void mmap_output(uint32 size)
{
if (size <= output_size)
return;
if (size % SYZ_PAGE_SIZE != 0)
failmsg("trying to mmap output area that is not divisible by page size", "page=%d,area=%d", SYZ_PAGE_SIZE, size);
uint32* mmap_at = NULL;
- int fixed_flag = MAP_FIXED;
if (output_data == NULL) {
if (kAddressSanitizer) {
- // Don't use fixed address under ASAN b/c it may overlap with shadow.
- fixed_flag = 0;
+ // ASan allows user mappings only at some specific address ranges,
+ // so we don't randomize. But we also assume 64-bits and that we are running tests.
+ mmap_at = (uint32*)0x7f0000000000ull;
} else {
// It's the first time we map output region - generate its location.
// The output region is the only thing in executor process for which consistency matters.
@@ -587,11 +669,11 @@ static void mmap_output(int size)
mmap_at = (uint32*)((char*)(output_data) + output_size);
}
void* result = mmap(mmap_at, size - output_size,
- PROT_READ | PROT_WRITE, MAP_SHARED | fixed_flag, kOutFd, output_size);
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, kOutFd, output_size);
if (result == MAP_FAILED || (mmap_at && result != mmap_at))
failmsg("mmap of output file failed", "want %p, got %p", mmap_at, result);
if (output_data == NULL)
- output_data = static_cast<uint32*>(result);
+ output_data = static_cast<OutputData*>(result);
output_size = size;
}
@@ -609,33 +691,28 @@ void setup_control_pipes()
fail("dup2(2, 0) failed");
}
-void parse_env_flags(uint64 flags)
+void parse_env_flags(rpc::ExecEnv flags)
{
// Note: Values correspond to ordering in pkg/ipc/ipc.go, e.g. FlagSandboxNamespace
- flag_debug = flags & (1 << 0);
- flag_coverage = flags & (1 << 1);
- if (flags & (1 << 2))
- flag_sandbox_setuid = true;
- else if (flags & (1 << 3))
- flag_sandbox_namespace = true;
- else if (flags & (1 << 4))
- flag_sandbox_android = true;
- else
- flag_sandbox_none = true;
- flag_extra_coverage = flags & (1 << 5);
- flag_net_injection = flags & (1 << 6);
- flag_net_devices = flags & (1 << 7);
- flag_net_reset = flags & (1 << 8);
- flag_cgroups = flags & (1 << 9);
- flag_close_fds = flags & (1 << 10);
- flag_devlink_pci = flags & (1 << 11);
- flag_vhci_injection = flags & (1 << 12);
- flag_wifi = flags & (1 << 13);
- flag_delay_kcov_mmap = flags & (1 << 14);
- flag_nic_vf = flags & (1 << 15);
+ flag_debug = (bool)(flags & rpc::ExecEnv::Debug);
+ flag_coverage = (bool)(flags & rpc::ExecEnv::Signal);
+ flag_sandbox_none = (bool)(flags & rpc::ExecEnv::SandboxNone);
+ flag_sandbox_setuid = (bool)(flags & rpc::ExecEnv::SandboxSetuid);
+ flag_sandbox_namespace = (bool)(flags & rpc::ExecEnv::SandboxNamespace);
+ flag_sandbox_android = (bool)(flags & rpc::ExecEnv::SandboxAndroid);
+ flag_extra_coverage = (bool)(flags & rpc::ExecEnv::ExtraCover);
+ flag_net_injection = (bool)(flags & rpc::ExecEnv::EnableTun);
+ flag_net_devices = (bool)(flags & rpc::ExecEnv::EnableNetDev);
+ flag_net_reset = (bool)(flags & rpc::ExecEnv::EnableNetReset);
+ flag_cgroups = (bool)(flags & rpc::ExecEnv::EnableCgroups);
+ flag_close_fds = (bool)(flags & rpc::ExecEnv::EnableCloseFds);
+ flag_devlink_pci = (bool)(flags & rpc::ExecEnv::EnableDevlinkPCI);
+ flag_vhci_injection = (bool)(flags & rpc::ExecEnv::EnableVhciInjection);
+ flag_wifi = (bool)(flags & rpc::ExecEnv::EnableWifi);
+ flag_delay_kcov_mmap = (bool)(flags & rpc::ExecEnv::DelayKcovMmap);
+ flag_nic_vf = (bool)(flags & rpc::ExecEnv::EnableNicVF);
}
-#if SYZ_EXECUTOR_USES_FORK_SERVER
void receive_handshake()
{
handshake_req req = {};
@@ -649,40 +726,22 @@ void receive_handshake()
#endif
parse_env_flags(req.flags);
procid = req.pid;
- if (!req.cover_filter_size)
- return;
- // A random address for bitmap. Don't corrupt output_data.
- cover_filter.emplace("syz-cover-filer", reinterpret_cast<void*>(0x110f230000ull));
- std::vector<uint64> pcs(req.cover_filter_size);
- const ssize_t filter_size = req.cover_filter_size * sizeof(uint64);
- n = read(kInPipeFd, &pcs[0], filter_size);
- if (n != filter_size)
- failmsg("failed to read cover filter", "read=%zu", n);
- for (auto pc : pcs)
- cover_filter->Insert(pc);
- cover_filter->Seal();
}
-void reply_handshake()
-{
- handshake_reply reply = {};
- reply.magic = kOutMagic;
- if (write(kOutPipeFd, &reply, sizeof(reply)) != sizeof(reply))
- fail("control pipe write failed");
-}
-#endif
-
static execute_req last_execute_req;
void receive_execute()
{
execute_req& req = last_execute_req;
- if (read(kInPipeFd, &req, sizeof(req)) != (ssize_t)sizeof(req))
- fail("control pipe read failed");
+ ssize_t n = read(kInPipeFd, &req, sizeof(req));
+ if (n != (ssize_t)sizeof(req))
+ failmsg("control pipe read failed", "read=%zd want=%zd", n, sizeof(req));
if (req.magic != kInMagic)
failmsg("bad execute request magic", "magic=0x%llx", req.magic);
+ request_id = req.id;
parse_env_flags(req.env_flags);
procid = req.pid;
+ request_id = req.id;
syscall_timeout_ms = req.syscall_timeout_ms;
program_timeout_ms = req.program_timeout_ms;
slowdown_scale = req.slowdown_scale;
@@ -691,12 +750,14 @@ void receive_execute()
flag_dedup_cover = req.exec_flags & (1 << 2);
flag_comparisons = req.exec_flags & (1 << 3);
flag_threaded = req.exec_flags & (1 << 4);
+ all_call_signal = req.all_call_signal;
+ all_extra_signal = req.all_extra_signal;
debug("[%llums] exec opts: procid=%llu threaded=%d cover=%d comps=%d dedup=%d signal=%d "
- " timeouts=%llu/%llu/%llu\n",
+ " sandbox=%d/%d/%d/%d timeouts=%llu/%llu/%llu\n",
current_time_ms() - start_time_ms, procid, flag_threaded, flag_collect_cover,
- flag_comparisons, flag_dedup_cover, flag_collect_signal, syscall_timeout_ms,
- program_timeout_ms, slowdown_scale);
+ flag_comparisons, flag_dedup_cover, flag_collect_signal, flag_sandbox_none, flag_sandbox_setuid,
+ flag_sandbox_namespace, flag_sandbox_android, syscall_timeout_ms, program_timeout_ms, slowdown_scale);
if (syscall_timeout_ms == 0 || program_timeout_ms <= syscall_timeout_ms || slowdown_scale == 0)
failmsg("bad timeouts", "syscall=%llu, program=%llu, scale=%llu",
syscall_timeout_ms, program_timeout_ms, slowdown_scale);
@@ -707,13 +768,9 @@ bool cover_collection_required()
return flag_coverage && (flag_collect_signal || flag_collect_cover || flag_comparisons);
}
-void reply_execute(int status)
+void reply_execute(uint32 status)
{
- execute_reply reply = {};
- reply.magic = kOutMagic;
- reply.done = true;
- reply.status = status;
- if (write(kOutPipeFd, &reply, sizeof(reply)) != sizeof(reply))
+ if (write(kOutPipeFd, &status, sizeof(status)) != sizeof(status))
fail("control pipe write failed");
}
@@ -736,11 +793,17 @@ void execute_one()
{
in_execute_one = true;
realloc_output_data();
- output_pos = output_data;
- write_output(0); // Number of executed syscalls (updated later).
+ output_builder.emplace(output_data, output_size);
uint64 start = current_time_ms();
uint8* input_pos = input_data;
+#if GOOS_linux
+ char buf[64];
+ // Linux TASK_COMM_LEN is only 16, so the name needs to be compact.
+ snprintf(buf, sizeof(buf), "syz.%llu.%llu", procid, request_id);
+ prctl(PR_SET_NAME, buf);
+#endif
+
if (cover_collection_required()) {
if (!flag_threaded)
cover_enable(&threads[0].cov, flag_comparisons, false);
@@ -991,55 +1054,96 @@ thread_t* schedule_call(int call_index, int call_num, uint64 copyout_index, uint
}
template <typename cover_data_t>
-void write_coverage_signal(cover_t* cov, uint32* signal_count_pos, uint32* cover_count_pos)
+uint32 write_signal(flatbuffers::FlatBufferBuilder& fbb, cover_t* cov, bool all)
{
// Write out feedback signals.
// Currently it is code edges computed as xor of two subsequent basic block PCs.
+ fbb.StartVector(0, sizeof(uint64));
cover_data_t* cover_data = (cover_data_t*)(cov->data + cov->data_offset);
- if (flag_collect_signal) {
- uint32 nsig = 0;
- cover_data_t prev_pc = 0;
- bool prev_filter = true;
- for (uint32 i = 0; i < cov->size; i++) {
- cover_data_t pc = cover_data[i] + cov->pc_offset;
- uint64 sig = pc;
- if (is_kernel_pc(pc) < 0)
- exitf("got bad pc: 0x%llx", (uint64)pc);
- if (use_cover_edges(pc)) {
- // Only hash the lower 12 bits so the hash is independent of any module offsets.
- const uint64 mask = (1 << 12) - 1;
- sig ^= hash(prev_pc & mask) & mask;
- }
- bool filter = coverage_filter(pc);
- // Ignore the edge only if both current and previous PCs are filtered out
- // to capture all incoming and outcoming edges into the interesting code.
- bool ignore = !filter && !prev_filter;
- prev_pc = pc;
- prev_filter = filter;
- if (ignore || dedup(sig))
- continue;
- write_output_64(sig);
- nsig++;
+ uint32 nsig = 0;
+ cover_data_t prev_pc = 0;
+ bool prev_filter = true;
+ for (uint32 i = 0; i < cov->size; i++) {
+ cover_data_t pc = cover_data[i] + cov->pc_offset;
+ if (is_kernel_pc(pc) < 0)
+ exitf("got bad pc: 0x%llx", (uint64)pc);
+ uint64 sig = pc;
+ if (use_cover_edges(pc)) {
+ // Only hash the lower 12 bits so the hash is independent of any module offsets.
+ const uint64 mask = (1 << 12) - 1;
+ sig ^= hash(prev_pc & mask) & mask;
}
- // Write out number of signals.
- *signal_count_pos = nsig;
+ bool filter = coverage_filter(pc);
+ // Ignore the edge only if both current and previous PCs are filtered out
+ // to capture all incoming and outcoming edges into the interesting code.
+ bool ignore = !filter && !prev_filter;
+ prev_pc = pc;
+ prev_filter = filter;
+ if (ignore || dedup(sig))
+ continue;
+ if (!all && max_signal && max_signal->Contains(sig))
+ continue;
+ fbb.PushElement(uint64(sig));
+ nsig++;
}
+ return fbb.EndVector(nsig);
+}
- if (flag_collect_cover) {
- // Write out real coverage (basic block PCs).
- uint32 cover_size = cov->size;
- if (flag_dedup_cover) {
- cover_data_t* end = cover_data + cover_size;
- cover_unprotect(cov);
- std::sort(cover_data, end);
- cover_size = std::unique(cover_data, end) - cover_data;
- cover_protect(cov);
- }
- // Always sent uint64 PCs.
- for (uint32 i = 0; i < cover_size; i++)
- write_output_64(cover_data[i] + cov->pc_offset);
- *cover_count_pos = cover_size;
+template <typename cover_data_t>
+uint32 write_cover(flatbuffers::FlatBufferBuilder& fbb, cover_t* cov)
+{
+ uint32 cover_size = cov->size;
+ cover_data_t* cover_data = (cover_data_t*)(cov->data + cov->data_offset);
+ if (flag_dedup_cover) {
+ cover_data_t* end = cover_data + cover_size;
+ cover_unprotect(cov);
+ std::sort(cover_data, end);
+ cover_size = std::unique(cover_data, end) - cover_data;
+ cover_protect(cov);
+ }
+ fbb.StartVector(cover_size, sizeof(uint64));
+ for (uint32 i = 0; i < cover_size; i++)
+ fbb.PushElement(uint64(cover_data[i] + cov->pc_offset));
+ return fbb.EndVector(cover_size);
+}
+
+uint32 write_comparisons(flatbuffers::FlatBufferBuilder& fbb, cover_t* cov)
+{
+ // Collect only the comparisons
+ uint64 ncomps = *(uint64_t*)cov->data;
+ kcov_comparison_t* cov_start = (kcov_comparison_t*)(cov->data + sizeof(uint64));
+ if ((char*)(cov_start + ncomps) > cov->data_end)
+ failmsg("too many comparisons", "ncomps=%llu", ncomps);
+ cover_unprotect(cov);
+ rpc::ComparisonRaw* start = (rpc::ComparisonRaw*)cov_start;
+ rpc::ComparisonRaw* end = start;
+ // We will convert kcov_comparison_t to ComparisonRaw inplace
+ // and potentially double number of elements, so ensure we have space.
+ static_assert(sizeof(kcov_comparison_t) >= 2 * sizeof(rpc::ComparisonRaw));
+ for (uint32 i = 0; i < ncomps; i++) {
+ auto [raw, swap, ok] = convert(cov_start[i]);
+ if (!ok)
+ continue;
+ *end++ = raw;
+ // Compiler marks comparisons with a const with KCOV_CMP_CONST flag.
+ // If the flag is set, then we need to export only one order of operands
+ // (because only one of them could potentially come from the input).
+ // If the flag is not set, then we export both orders as both operands
+ // could come from the input.
+ if (swap)
+ *end++ = {raw.op2(), raw.op1()};
}
+ std::sort(start, end, [](rpc::ComparisonRaw a, rpc::ComparisonRaw b) -> bool {
+ if (a.op1() != b.op1())
+ return a.op1() < b.op1();
+ return a.op2() < b.op2();
+ });
+ ncomps = std::unique(start, end, [](rpc::ComparisonRaw a, rpc::ComparisonRaw b) -> bool {
+ return a.op1() == b.op1() && a.op2() == b.op2();
+ }) -
+ start;
+ cover_protect(cov);
+ return fbb.CreateVectorOfStructs(start, ncomps).o;
}
bool coverage_filter(uint64 pc)
@@ -1109,56 +1213,67 @@ void copyout_call_results(thread_t* th)
}
}
+void write_output(int index, cover_t* cov, rpc::CallFlag flags, uint32 error, bool all_signal)
+{
+ auto& fbb = *output_builder;
+ const uint32 start_size = output_builder->GetSize();
+ (void)start_size;
+ uint32 signal_off = 0;
+ uint32 cover_off = 0;
+ uint32 comps_off = 0;
+ if (flag_comparisons) {
+ comps_off = write_comparisons(fbb, cov);
+ } else {
+ if (flag_collect_signal) {
+ if (is_kernel_64_bit)
+ signal_off = write_signal<uint64>(fbb, cov, all_signal);
+ else
+ signal_off = write_signal<uint32>(fbb, cov, all_signal);
+ }
+ if (flag_collect_cover) {
+ if (is_kernel_64_bit)
+ cover_off = write_cover<uint64>(fbb, cov);
+ else
+ cover_off = write_cover<uint32>(fbb, cov);
+ }
+ }
+
+ rpc::CallInfoRawBuilder builder(*output_builder);
+ builder.add_flags(flags);
+ builder.add_error(error);
+ if (signal_off)
+ builder.add_signal(signal_off);
+ if (cover_off)
+ builder.add_cover(cover_off);
+ if (comps_off)
+ builder.add_comps(comps_off);
+ auto off = builder.Finish();
+ uint32 slot = output_data->completed.load(std::memory_order_relaxed);
+ if (slot >= kMaxCalls)
+ failmsg("too many calls in output", "slot=%d", slot);
+ auto& call = output_data->calls[slot];
+ call.index = index;
+ call.offset = off;
+ output_data->consumed.store(output_builder->GetSize(), std::memory_order_release);
+ output_data->completed.store(slot + 1, std::memory_order_release);
+ debug_verbose("out #%u: index=%u errno=%d flags=0x%x total_size=%u\n",
+ slot + 1, index, error, static_cast<unsigned>(flags), call.data_size - start_size);
+}
+
void write_call_output(thread_t* th, bool finished)
{
uint32 reserrno = ENOSYS;
- const bool blocked = finished && th != last_scheduled;
- uint32 call_flags = call_flag_executed | (blocked ? call_flag_blocked : 0);
+ rpc::CallFlag flags = rpc::CallFlag::Executed;
+ if (finished && th != last_scheduled)
+ flags |= rpc::CallFlag::Blocked;
if (finished) {
reserrno = th->res != -1 ? 0 : th->reserrno;
- call_flags |= call_flag_finished |
- (th->fault_injected ? call_flag_fault_injected : 0);
+ flags |= rpc::CallFlag::Finished;
+ if (th->fault_injected)
+ flags |= rpc::CallFlag::FaultInjected;
}
- write_output(kOutMagic);
- write_output(th->call_index);
- write_output(th->call_num);
- write_output(reserrno);
- write_output(call_flags);
- uint32* signal_count_pos = write_output(0); // filled in later
- uint32* cover_count_pos = write_output(0); // filled in later
- uint32* comps_count_pos = write_output(0); // filled in later
-
- if (flag_comparisons) {
- // Collect only the comparisons
- uint64 ncomps = *(uint64_t*)th->cov.data;
- kcov_comparison_t* start = (kcov_comparison_t*)(th->cov.data + sizeof(uint64));
- kcov_comparison_t* end = start + ncomps;
- if ((char*)end > th->cov.data_end)
- failmsg("too many comparisons", "ncomps=%llu", ncomps);
- cover_unprotect(&th->cov);
- std::sort(start, end);
- ncomps = std::unique(start, end) - start;
- cover_protect(&th->cov);
- uint32 comps_size = 0;
- for (uint32 i = 0; i < ncomps; ++i) {
- if (start[i].ignore())
- continue;
- comps_size++;
- start[i].write();
- }
- // Write out number of comparisons.
- *comps_count_pos = comps_size;
- } else if (flag_collect_signal || flag_collect_cover) {
- if (is_kernel_64_bit)
- write_coverage_signal<uint64>(&th->cov, signal_count_pos, cover_count_pos);
- else
- write_coverage_signal<uint32>(&th->cov, signal_count_pos, cover_count_pos);
- }
- debug_verbose("out #%u: index=%u num=%u errno=%d finished=%d blocked=%d sig=%u cover=%u comps=%llu\n",
- completed, th->call_index, th->call_num, reserrno, finished, blocked,
- *signal_count_pos, *cover_count_pos, *comps_count_pos);
- completed++;
- write_completed(completed);
+ bool all_signal = th->call_index < 64 ? (all_call_signal & (1ull << th->call_index)) : false;
+ write_output(th->call_index, &th->cov, flags, reserrno, all_signal);
}
void write_extra_output()
@@ -1168,22 +1283,7 @@ void write_extra_output()
cover_collect(&extra_cov);
if (!extra_cov.size)
return;
- write_output(kOutMagic);
- write_output(-1); // call index
- write_output(-1); // call num
- write_output(999); // errno
- write_output(0); // call flags
- uint32* signal_count_pos = write_output(0); // filled in later
- uint32* cover_count_pos = write_output(0); // filled in later
- write_output(0); // comps_count_pos
- if (is_kernel_64_bit)
- write_coverage_signal<uint64>(&extra_cov, signal_count_pos, cover_count_pos);
- else
- write_coverage_signal<uint32>(&extra_cov, signal_count_pos, cover_count_pos);
- cover_reset(&extra_cov);
- debug_verbose("extra: sig=%u cover=%u\n", *signal_count_pos, *cover_count_pos);
- completed++;
- write_completed(completed);
+ write_output(-1, &extra_cov, rpc::CallFlag::NONE, 997, all_extra_signal);
}
void thread_create(thread_t* th, int id, bool need_coverage)
@@ -1518,45 +1618,42 @@ uint64 read_input(uint8** input_posp, bool peek)
return v;
}
-uint32* write_output(uint32 v)
-{
- if (output_pos < output_data || (char*)output_pos >= (char*)output_data + output_size)
- failmsg("output overflow", "pos=%p region=[%p:%p]",
- output_pos, output_data, (char*)output_data + output_size);
- *output_pos = v;
- return output_pos++;
-}
-
-uint32* write_output_64(uint64 v)
-{
- if (output_pos < output_data || (char*)(output_pos + 1) >= (char*)output_data + output_size)
- failmsg("output overflow", "pos=%p region=[%p:%p]",
- output_pos, output_data, (char*)output_data + output_size);
- *(uint64*)output_pos = v;
- output_pos += 2;
- return output_pos;
-}
-
-void write_completed(uint32 completed)
+std::tuple<rpc::ComparisonRaw, bool, bool> convert(const kcov_comparison_t& cmp)
{
- __atomic_store_n(output_data, completed, __ATOMIC_RELEASE);
-}
+ if (cmp.type > (KCOV_CMP_CONST | KCOV_CMP_SIZE_MASK))
+ failmsg("invalid kcov comp type", "type=%llx", cmp.type);
+ uint64 arg1 = cmp.arg1;
+ uint64 arg2 = cmp.arg2;
+ // Comparisons with 0 are not interesting, fuzzer should be able to guess 0's without help.
+ if (arg1 == 0 && (arg2 == 0 || (cmp.type & KCOV_CMP_CONST)))
+ return {};
+ // Successful comparison is not interesting.
+ if (arg1 == arg2)
+ return {};
-void kcov_comparison_t::write()
-{
- if (type > (KCOV_CMP_CONST | KCOV_CMP_SIZE_MASK))
- failmsg("invalid kcov comp type", "type=%llx", type);
-
- // Write order: type arg1 arg2 pc.
- write_output((uint32)type);
-
- // KCOV converts all arguments of size x first to uintx_t and then to
- // uint64. We want to properly extend signed values, e.g we want
- // int8 c = 0xfe to be represented as 0xfffffffffffffffe.
- // Note that uint8 c = 0xfe will be represented the same way.
- // This is ok because during hints processing we will anyways try
- // the value 0x00000000000000fe.
- switch (type & KCOV_CMP_SIZE_MASK) {
+ // This can be a pointer (assuming 64-bit kernel).
+ // First of all, we want avert fuzzer from our output region.
+ // Without this fuzzer manages to discover and corrupt it.
+ uint64 out_start = (uint64)output_data;
+ uint64 out_end = out_start + output_size;
+ if (arg1 >= out_start && arg1 <= out_end)
+ return {};
+ if (arg2 >= out_start && arg2 <= out_end)
+ return {};
+ // Filter out kernel physical memory addresses.
+ // These are internal kernel comparisons and should not be interesting.
+ bool kptr1 = is_kernel_data(arg1) || is_kernel_pc(arg1) > 0 || arg1 == 0;
+ bool kptr2 = is_kernel_data(arg2) || is_kernel_pc(arg2) > 0 || arg2 == 0;
+ if (kptr1 && kptr2)
+ return {};
+ if (!coverage_filter(cmp.pc))
+ return {};
+
+ // KCOV converts all arguments of size x first to uintx_t and then to uint64.
+ // We want to properly extend signed values, e.g we want int8 c = 0xfe to be represented
+ // as 0xfffffffffffffffe. Note that uint8 c = 0xfe will be represented the same way.
+ // This is ok because during hints processing we will anyways try the value 0x00000000000000fe.
+ switch (cmp.type & KCOV_CMP_SIZE_MASK) {
case KCOV_CMP_SIZE1:
arg1 = (uint64)(long long)(signed char)arg1;
arg2 = (uint64)(long long)(signed char)arg2;
@@ -1570,88 +1667,10 @@ void kcov_comparison_t::write()
arg2 = (uint64)(long long)(int)arg2;
break;
}
- bool is_size_8 = (type & KCOV_CMP_SIZE_MASK) == KCOV_CMP_SIZE8;
- if (!is_size_8) {
- write_output((uint32)arg1);
- write_output((uint32)arg2);
- } else {
- write_output_64(arg1);
- write_output_64(arg2);
- }
-}
-bool kcov_comparison_t::ignore() const
-{
- // Comparisons with 0 are not interesting, fuzzer should be able to guess 0's without help.
- if (arg1 == 0 && (arg2 == 0 || (type & KCOV_CMP_CONST)))
- return true;
- // This can be a pointer (assuming 64-bit kernel).
- // First of all, we want avert fuzzer from our output region.
- // Without this fuzzer manages to discover and corrupt it.
- uint64 out_start = (uint64)output_data;
- uint64 out_end = out_start + output_size;
- if (arg1 >= out_start && arg1 <= out_end)
- return true;
- if (arg2 >= out_start && arg2 <= out_end)
- return true;
- // Filter out kernel physical memory addresses.
- // These are internal kernel comparisons and should not be interesting.
- bool kptr1 = is_kernel_data(arg1) || is_kernel_pc(arg1) > 0 || arg1 == 0;
- bool kptr2 = is_kernel_data(arg2) || is_kernel_pc(arg2) > 0 || arg2 == 0;
- if (kptr1 && kptr2)
- return true;
- return !coverage_filter(pc);
-}
-
-bool kcov_comparison_t::operator==(const struct kcov_comparison_t& other) const
-{
- // We don't check for PC equality now, because it is not used.
- return type == other.type && arg1 == other.arg1 && arg2 == other.arg2;
-}
-
-bool kcov_comparison_t::operator<(const struct kcov_comparison_t& other) const
-{
- if (type != other.type)
- return type < other.type;
- if (arg1 != other.arg1)
- return arg1 < other.arg1;
- // We don't check for PC equality now, because it is not used.
- return arg2 < other.arg2;
-}
-
-void setup_features(char** enable, int n)
-{
- // This does any one-time setup for the requested features on the machine.
- // Note: this can be called multiple times and must be idempotent.
- flag_debug = true;
- if (n != 1)
- fail("setup: more than one feature");
- char* endptr = nullptr;
- auto feature = static_cast<rpc::Feature>(strtoull(enable[0], &endptr, 10));
- if (endptr == enable[0] || (feature > rpc::Feature::ANY) ||
- __builtin_popcountll(static_cast<uint64>(feature)) > 1)
- failmsg("setup: failed to parse feature", "feature='%s'", enable[0]);
- if (feature == rpc::Feature::NONE) {
-#if SYZ_HAVE_FEATURES
- setup_sysctl();
- setup_cgroups();
-#endif
-#if SYZ_HAVE_SETUP_EXT
- // This can be defined in common_ext.h.
- setup_ext();
-#endif
- return;
- }
- for (size_t i = 0; i < sizeof(features) / sizeof(features[0]); i++) {
- if (features[i].id == feature) {
- const char* reason = features[i].setup();
- if (reason)
- fail(reason);
- return;
- }
- }
- // Note: pkg/host knows about this error message.
- fail("feature setup is not needed");
+ // Prog package expects operands in the opposite order (first operand may come from the input,
+ // the second operand was computed in the kernel), so swap operands.
+ return {{arg2, arg1}, !(cmp.type & KCOV_CMP_CONST), true};
}
void failmsg(const char* err, const char* msg, ...)
diff --git a/executor/executor_linux.h b/executor/executor_linux.h
index 30c10a615..cb980838f 100644
--- a/executor/executor_linux.h
+++ b/executor/executor_linux.h
@@ -279,22 +279,15 @@ NORETURN void doexit_thread(int status)
}
#define SYZ_HAVE_KCSAN 1
-static void setup_kcsan_filterlist(char** frames, int nframes, bool suppress)
+static void setup_kcsan_filter(const std::vector<std::string>& frames)
{
+ if (frames.empty())
+ return;
int fd = open("/sys/kernel/debug/kcsan", O_WRONLY);
if (fd == -1)
fail("failed to open kcsan debugfs file");
-
- printf("%s KCSAN reports in functions: ",
- suppress ? "suppressing" : "only showing");
- if (!suppress)
- dprintf(fd, "whitelist\n");
- for (int i = 0; i < nframes; ++i) {
- printf("'%s' ", frames[i]);
- dprintf(fd, "!%s\n", frames[i]);
- }
- printf("\n");
-
+ for (const auto& frame : frames)
+ dprintf(fd, "!%s\n", frame.c_str());
close(fd);
}
diff --git a/executor/executor_runner.h b/executor/executor_runner.h
new file mode 100644
index 000000000..55a6c422c
--- /dev/null
+++ b/executor/executor_runner.h
@@ -0,0 +1,801 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <deque>
+#include <iomanip>
+#include <memory>
+#include <optional>
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+inline std::ostream& operator<<(std::ostream& ss, const rpc::ExecRequestRawT& req)
+{
+ return ss << "id=" << req.id
+ << " flags=0x" << std::hex << static_cast<uint64>(req.flags)
+ << " env_flags=0x" << std::hex << static_cast<uint64>(req.exec_opts->env_flags())
+ << " exec_flags=0x" << std::hex << static_cast<uint64>(req.exec_opts->exec_flags())
+ << " prod_data=" << std::dec << req.prog_data.size()
+ << "\n";
+}
+
+// Proc represents one subprocess that runs tests (re-execed syz-executor with 'exec' argument).
+// The object is persistent and re-starts subprocess when it crashes.
+class Proc
+{
+public:
+ Proc(Connection& conn, const char* bin, int id, int max_signal_fd, int cover_filter_fd,
+ uint32 slowdown, uint32 syscall_timeout_ms, uint32 program_timeout_ms)
+ : conn_(conn),
+ bin_(bin),
+ id_(id),
+ max_signal_fd_(max_signal_fd),
+ cover_filter_fd_(cover_filter_fd),
+ slowdown_(slowdown),
+ syscall_timeout_ms_(syscall_timeout_ms),
+ program_timeout_ms_(program_timeout_ms),
+ req_shmem_(kMaxInput),
+ resp_shmem_(kMaxOutput),
+ resp_mem_(static_cast<OutputData*>(resp_shmem_.Mem()))
+ {
+ Start();
+ }
+
+ bool Execute(rpc::ExecRequestRawT& msg)
+ {
+ if (state_ != State::Started && state_ != State::Idle)
+ return false;
+ if (msg_)
+ fail("already have pending msg");
+ if (wait_start_)
+ wait_end_ = current_time_ms();
+ if (state_ == State::Idle &&
+ (exec_env_ != msg.exec_opts->env_flags() || sandbox_arg_ != msg.exec_opts->sandbox_arg()))
+ Restart();
+ attempts_ = 0;
+ msg_ = std::move(msg);
+ if (state_ == State::Started)
+ Handshake();
+ else
+ Execute();
+ return true;
+ }
+
+ void Arm(Select& select)
+ {
+ select.Arm(resp_pipe_);
+ select.Arm(stdout_pipe_);
+ }
+
+ void Ready(Select& select, uint64 now, bool out_of_requests)
+ {
+ if (state_ == State::Handshaking || state_ == State::Executing) {
+ // Check if the subprocess has hung.
+#if SYZ_EXECUTOR_USES_FORK_SERVER
+ // Child process has an internal timeout and protects against most hangs when
+ // fork server is enabled, so we use quite large timeout. Child process can be slow
+ // due to global locks in namespaces and other things, so let's better wait than
+ // report false misleading crashes.
+ uint64 timeout = 2 * program_timeout_ms_;
+#else
+ uint64 timeout = program_timeout_ms_;
+#endif
+ // Sandbox setup can take significant time.
+ if (state_ == State::Handshaking)
+ timeout = 60 * 1000 * slowdown_;
+ if (now > exec_start_ + timeout) {
+ Restart();
+ return;
+ }
+ }
+
+ if (select.Ready(stdout_pipe_) && !ReadOutput()) {
+#if SYZ_EXECUTOR_USES_FORK_SERVER
+ // In non-forking mode the subprocess exits after test execution
+ // and the pipe read fails with EOF, so we rely on the resp_pipe_ instead.
+ Restart();
+ return;
+#endif
+ }
+ if (select.Ready(resp_pipe_) && !ReadResponse(out_of_requests)) {
+ Restart();
+ return;
+ }
+ return;
+ }
+
+private:
+ enum State : uint8 {
+ // The process has just started.
+ Started,
+ // We sent the process env flags and waiting for handshake reply.
+ Handshaking,
+ // Handshaked and ready to execute programs.
+ Idle,
+ // Currently executing a test program.
+ Executing,
+ };
+
+ Connection& conn_;
+ const char* const bin_;
+ const int id_;
+ const int max_signal_fd_;
+ const int cover_filter_fd_;
+ const uint32 slowdown_;
+ const uint32 syscall_timeout_ms_;
+ const uint32 program_timeout_ms_;
+ State state_ = State::Started;
+ std::optional<Subprocess> process_;
+ ShmemFile req_shmem_;
+ ShmemFile resp_shmem_;
+ OutputData* resp_mem_;
+ int req_pipe_ = -1;
+ int resp_pipe_ = -1;
+ int stdout_pipe_ = -1;
+ rpc::ExecEnv exec_env_ = rpc::ExecEnv::NONE;
+ int64_t sandbox_arg_ = 0;
+ std::optional<rpc::ExecRequestRawT> msg_;
+ std::vector<uint8_t> output_;
+ size_t debug_output_pos_ = 0;
+ uint64 attempts_ = 0;
+ uint64 freshness_ = 0;
+ uint64 exec_start_ = 0;
+ uint64 wait_start_ = 0;
+ uint64 wait_end_ = 0;
+
+ friend std::ostream& operator<<(std::ostream& ss, const Proc& proc)
+ {
+ ss << "id=" << proc.id_
+ << " state=" << static_cast<int>(proc.state_)
+ << " freshness=" << proc.freshness_
+ << " attempts=" << proc.attempts_
+ << " exec_start=" << current_time_ms() - proc.exec_start_
+ << "\n";
+ if (proc.msg_)
+ ss << "\tcurrent request: " << *proc.msg_;
+ return ss;
+ }
+
+ void Restart()
+ {
+ debug("proc %d: restarting subprocess, current state %u attempts %llu\n", id_, state_, attempts_);
+ int status = process_->KillAndWait();
+ process_.reset();
+ debug("proc %d: subprocess exit status %d\n", id_, status);
+ if (++attempts_ > 20) {
+ while (ReadOutput())
+ ;
+ // Write the subprocess output first. If it contains own SYFAIL,
+ // we want it to be before our SYZFAIL.
+ ssize_t wrote = write(STDERR_FILENO, output_.data(), output_.size());
+ if (wrote != static_cast<ssize_t>(output_.size()))
+ fprintf(stderr, "output truncated: %zd/%zd (errno=%d)\n",
+ wrote, output_.size(), errno);
+ uint64 req_id = msg_ ? msg_->id : -1;
+ failmsg("repeatedly failed to execute the program", "proc=%d req=%lld state=%d status=%d",
+ id_, req_id, state_, status);
+ }
+ // Ignore all other errors.
+ // Without fork server executor can legitimately exit (program contains exit_group),
+ // with fork server the top process can exit with kFailStatus if it wants special handling.
+ if (status != kFailStatus)
+ status = 0;
+ if (FailCurrentRequest(status == kFailStatus)) {
+ // Read out all pening output until EOF.
+ if (IsSet(msg_->flags, rpc::RequestFlag::ReturnOutput)) {
+ while (ReadOutput())
+ ;
+ }
+ HandleCompletion(status);
+ } else if (attempts_ > 3)
+ sleep_ms(100 * attempts_);
+ Start();
+ }
+
+ bool FailCurrentRequest(bool failed)
+ {
+ if (state_ == State::Handshaking)
+ return failed && IsSet(msg_->flags, rpc::RequestFlag::ReturnError);
+ if (state_ == State::Executing)
+ return !failed || IsSet(msg_->flags, rpc::RequestFlag::ReturnError);
+ return false;
+ }
+
+ void Start()
+ {
+ state_ = State::Started;
+ freshness_ = 0;
+ int req_pipe[2];
+ if (pipe(req_pipe))
+ fail("pipe failed");
+ int resp_pipe[2];
+ if (pipe(resp_pipe))
+ fail("pipe failed");
+ int stdout_pipe[2];
+ if (pipe(stdout_pipe))
+ fail("pipe failed");
+
+ std::vector<std::pair<int, int>> fds = {
+ {req_pipe[0], STDIN_FILENO},
+ {resp_pipe[1], STDOUT_FILENO},
+ {stdout_pipe[1], STDERR_FILENO},
+ {req_shmem_.FD(), kInFd},
+ {resp_shmem_.FD(), kOutFd},
+ {max_signal_fd_, kMaxSignalFd},
+ {cover_filter_fd_, kCoverFilterFd},
+ };
+ const char* argv[] = {bin_, "exec", nullptr};
+ process_.emplace(argv, fds);
+
+ Select::Prepare(resp_pipe[0]);
+ Select::Prepare(stdout_pipe[0]);
+
+ close(req_pipe[0]);
+ close(resp_pipe[1]);
+ close(stdout_pipe[1]);
+
+ close(req_pipe_);
+ close(resp_pipe_);
+ close(stdout_pipe_);
+
+ req_pipe_ = req_pipe[1];
+ resp_pipe_ = resp_pipe[0];
+ stdout_pipe_ = stdout_pipe[0];
+
+ if (msg_)
+ Handshake();
+ }
+
+ void Handshake()
+ {
+ if (state_ != State::Started || !msg_)
+ fail("wrong handshake state");
+ debug("proc %d: handshaking to execute request %llu\n", id_, static_cast<uint64>(msg_->id));
+ state_ = State::Handshaking;
+ exec_start_ = current_time_ms();
+ exec_env_ = msg_->exec_opts->env_flags() & ~rpc::ExecEnv::ResetState;
+ sandbox_arg_ = msg_->exec_opts->sandbox_arg();
+ handshake_req req = {
+ .magic = kInMagic,
+ .flags = exec_env_,
+ .pid = static_cast<uint64>(id_),
+ .sandbox_arg = static_cast<uint64>(sandbox_arg_),
+ };
+ if (write(req_pipe_, &req, sizeof(req)) != sizeof(req)) {
+ debug("request pipe write failed (errno=%d)\n", errno);
+ Restart();
+ }
+ }
+
+ void Execute()
+ {
+ if (state_ != State::Idle || !msg_)
+ fail("wrong state for execute");
+
+ debug("proc %d: start executing request %llu\n", id_, static_cast<uint64>(msg_->id));
+
+ rpc::ExecutingMessageRawT exec;
+ exec.id = msg_->id;
+ exec.proc_id = id_;
+ exec.try_ = attempts_;
+
+ if (wait_start_) {
+ exec.wait_duration = (wait_end_ - wait_start_) * 1000 * 1000;
+ wait_end_ = wait_start_ = 0;
+ }
+
+ rpc::ExecutorMessageRawT raw;
+ raw.msg.Set(std::move(exec));
+ conn_.Send(raw);
+
+ uint64 all_call_signal = 0;
+ bool all_extra_signal = false;
+ for (int32_t call : msg_->all_signal) {
+ // This code assumes that call indices can be represented as bits in uint64 all_call_signal.
+ static_assert(kMaxCalls == 64);
+ if (call < -1 || call >= static_cast<int32_t>(kMaxCalls))
+ failmsg("bad all_signal call", "call=%d", call);
+ if (call < 0)
+ all_extra_signal = true;
+ else
+ all_call_signal |= 1ull << call;
+ }
+ memcpy(req_shmem_.Mem(), msg_->prog_data.data(), std::min(msg_->prog_data.size(), kMaxInput));
+ execute_req req{
+ .magic = kInMagic,
+ .id = static_cast<uint64>(msg_->id),
+ .env_flags = exec_env_,
+ .exec_flags = static_cast<uint64>(msg_->exec_opts->exec_flags()),
+ .pid = static_cast<uint64>(id_),
+ .syscall_timeout_ms = syscall_timeout_ms_,
+ .program_timeout_ms = program_timeout_ms_,
+ .slowdown_scale = slowdown_,
+ .all_call_signal = all_call_signal,
+ .all_extra_signal = all_extra_signal,
+ };
+ exec_start_ = current_time_ms();
+ state_ = State::Executing;
+ if (write(req_pipe_, &req, sizeof(req)) != sizeof(req)) {
+ debug("request pipe write failed (errno=%d)\n", errno);
+ Restart();
+ }
+ }
+
+ void HandleCompletion(uint32 status)
+ {
+ if (!msg_)
+ fail("don't have executed msg");
+
+ // Note: if the child process crashed during handshake and the request has ReturnError flag,
+ // we have not started executing the request yet.
+ uint64 elapsed = (current_time_ms() - exec_start_) * 1000 * 1000;
+ uint8* prog_data = msg_->prog_data.data();
+ input_data = prog_data;
+ uint32 num_calls = read_input(&prog_data);
+
+ int output_size = resp_mem_->size.load(std::memory_order_relaxed) ?: kMaxOutput;
+ uint32 completed = resp_mem_->completed.load(std::memory_order_relaxed);
+ completed = std::min(completed, kMaxCalls);
+ debug("handle completion: completed=%u output_size=%u\n", completed, output_size);
+ ShmemBuilder fbb(resp_mem_, output_size);
+ auto empty_call = rpc::CreateCallInfoRawDirect(fbb, rpc::CallFlag::NONE, 998);
+ std::vector<flatbuffers::Offset<rpc::CallInfoRaw>> calls(num_calls, empty_call);
+ std::vector<flatbuffers::Offset<rpc::CallInfoRaw>> extra;
+ for (uint32_t i = 0; i < completed; i++) {
+ const auto& call = resp_mem_->calls[i];
+ if (call.index == -1) {
+ extra.push_back(call.offset);
+ continue;
+ }
+ if (call.index < 0 || call.index >= static_cast<int>(num_calls) || call.offset.o > kMaxOutput) {
+ debug("bad call index/offset: proc=%d req=%llu call=%d/%d completed=%d offset=%u",
+ id_, static_cast<uint64>(msg_->id), call.index, num_calls,
+ completed, call.offset.o);
+ continue;
+ }
+ calls[call.index] = call.offset;
+ }
+
+ auto prog_info_off = rpc::CreateProgInfoRawDirect(fbb, &calls, &extra, 0, elapsed, freshness_++);
+
+ flatbuffers::Offset<flatbuffers::String> error_off = 0;
+ if (status == kFailStatus)
+ error_off = fbb.CreateString("process failed");
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> output_off = 0;
+ if (IsSet(msg_->flags, rpc::RequestFlag::ReturnOutput)) {
+ if (status) {
+ char tmp[128];
+ snprintf(tmp, sizeof(tmp), "\nprocess exited with status %d\n", status);
+ output_.insert(output_.end(), tmp, tmp + strlen(tmp));
+ }
+ output_off = fbb.CreateVector(output_);
+ }
+ auto exec_off = rpc::CreateExecResultRaw(fbb, msg_->id, output_off, error_off, prog_info_off);
+ auto msg_off = rpc::CreateExecutorMessageRaw(fbb, rpc::ExecutorMessagesRaw::ExecResult,
+ flatbuffers::Offset<void>(exec_off.o));
+ fbb.FinishSizePrefixed(msg_off);
+ auto data = fbb.GetBufferSpan();
+ conn_.Send(data.data(), data.size());
+
+ resp_mem_->Reset();
+ msg_.reset();
+ output_.clear();
+ debug_output_pos_ = 0;
+ state_ = State::Idle;
+#if !SYZ_EXECUTOR_USES_FORK_SERVER
+ if (process_)
+ Restart();
+#endif
+ }
+
+ bool ReadResponse(bool out_of_requests)
+ {
+ uint32 status;
+ ssize_t n = read(resp_pipe_, &status, sizeof(status));
+ if (n == 0) {
+ debug("proc %d: response pipe EOF\n", id_);
+ return false;
+ }
+ if (n != sizeof(status))
+ failmsg("proc resp pipe read failed", "n=%zd", n);
+ if (state_ == State::Handshaking) {
+ debug("proc %d: got handshake reply\n", id_);
+ state_ = State::Idle;
+ Execute();
+ } else if (state_ == State::Executing) {
+ debug("proc %d: got execute reply\n", id_);
+ HandleCompletion(status);
+ if (out_of_requests)
+ wait_start_ = current_time_ms();
+ } else {
+ debug("got data on response pipe in wrong state %d\n", state_);
+ return false;
+ }
+ return true;
+ }
+
+ bool ReadOutput()
+ {
+ const size_t kChunk = 1024;
+ output_.resize(output_.size() + kChunk);
+ ssize_t n = read(stdout_pipe_, output_.data() + output_.size() - kChunk, kChunk);
+ output_.resize(output_.size() - kChunk + std::max<ssize_t>(n, 0));
+ if (n < 0) {
+ if (errno == EINTR || errno == EAGAIN)
+ return true;
+ fail("proc stdout read failed");
+ }
+ if (n == 0) {
+ debug("proc %d: output pipe EOF\n", id_);
+ return false;
+ }
+ if (flag_debug) {
+ output_.resize(output_.size() + 1);
+ debug("proc %d: got output: %s\n", id_, output_.data() + debug_output_pos_);
+ output_.resize(output_.size() - 1);
+ debug_output_pos_ = output_.size();
+ }
+ return true;
+ }
+};
+
+// Runner manages a set of test subprocesses (Proc's), receives new test requests from the manager,
+// and dispatches them to subprocesses.
+class Runner
+{
+public:
+ Runner(Connection& conn, const char* name, const char* bin)
+ : conn_(conn),
+ name_(name)
+ {
+ size_t num_procs = Handshake();
+ int max_signal_fd = max_signal_ ? max_signal_->FD() : -1;
+ int cover_filter_fd = cover_filter_ ? cover_filter_->FD() : -1;
+ for (size_t i = 0; i < num_procs; i++)
+ procs_.emplace_back(new Proc(conn, bin, i, max_signal_fd, cover_filter_fd,
+ slowdown_, syscall_timeout_ms_, program_timeout_ms_));
+
+ for (;;)
+ Loop();
+ }
+
+private:
+ Connection& conn_;
+ const char* const name_;
+ std::optional<CoverFilter> max_signal_;
+ std::optional<CoverFilter> cover_filter_;
+ std::vector<std::unique_ptr<Proc>> procs_;
+ std::deque<rpc::ExecRequestRawT> requests_;
+ std::vector<std::string> leak_frames_;
+ uint32 slowdown_ = 0;
+ uint32 syscall_timeout_ms_ = 0;
+ uint32 program_timeout_ms_ = 0;
+
+ friend std::ostream& operator<<(std::ostream& ss, const Runner& runner)
+ {
+ ss << "procs:\n";
+ for (const auto& proc : runner.procs_)
+ ss << *proc;
+ ss << "\nqueued requests (" << runner.requests_.size() << "):\n";
+ for (const auto& req : runner.requests_)
+ ss << req;
+ return ss;
+ }
+
+ void Loop()
+ {
+ Select select;
+ select.Arm(conn_.FD());
+ for (auto& proc : procs_)
+ proc->Arm(select);
+ // Wait for ready host connection and subprocess pipes.
+ // Timeout is for terminating hanged subprocesses.
+ select.Wait(1000);
+ uint64 now = current_time_ms();
+
+ if (select.Ready(conn_.FD())) {
+ rpc::HostMessageRawT raw;
+ conn_.Recv(raw);
+ if (auto* msg = raw.msg.AsExecRequest())
+ Handle(*msg);
+ else if (auto* msg = raw.msg.AsSignalUpdate())
+ Handle(*msg);
+ else if (auto* msg = raw.msg.AsStartLeakChecks())
+ Handle(*msg);
+ else if (auto* msg = raw.msg.AsStateRequest())
+ Handle(*msg);
+ else
+ failmsg("unknown host message type", "type=%d", static_cast<int>(raw.msg.type));
+ }
+
+ for (auto& proc : procs_) {
+ proc->Ready(select, now, requests_.empty());
+ if (!requests_.empty()) {
+ if (proc->Execute(requests_.front()))
+ requests_.pop_front();
+ }
+ }
+ }
+
+ size_t Handshake()
+ {
+ rpc::ConnectRequestRawT conn_req;
+ conn_req.name = name_;
+ conn_req.arch = GOARCH;
+ conn_req.git_revision = GIT_REVISION;
+ conn_req.syz_revision = SYZ_REVISION;
+ conn_.Send(conn_req);
+
+ rpc::ConnectReplyRawT conn_reply;
+ conn_.Recv(conn_reply);
+ if (conn_reply.debug)
+ flag_debug = true;
+ debug("connected to manager: procs=%d slowdown=%d syscall_timeout=%u"
+ " program_timeout=%u features=0x%llx\n",
+ conn_reply.procs, conn_reply.slowdown, conn_reply.syscall_timeout_ms,
+ conn_reply.program_timeout_ms, static_cast<uint64>(conn_reply.features));
+ leak_frames_ = conn_reply.leak_frames;
+ slowdown_ = conn_reply.slowdown;
+ syscall_timeout_ms_ = conn_reply.syscall_timeout_ms;
+ program_timeout_ms_ = conn_reply.program_timeout_ms;
+ if (conn_reply.cover)
+ max_signal_.emplace();
+
+ rpc::InfoRequestRawT info_req;
+ info_req.files = ReadFiles(conn_reply.files);
+ info_req.globs = ReadGlobs(conn_reply.globs);
+
+ // This does any one-time setup for the requested features on the machine.
+ // Note: this can be called multiple times and must be idempotent.
+ // is_kernel_64_bit = detect_kernel_bitness();
+#if SYZ_HAVE_FEATURES
+ setup_sysctl();
+ setup_cgroups();
+#endif
+#if SYZ_HAVE_SETUP_EXT
+ // This can be defined in common_ext.h.
+ setup_ext();
+#endif
+ for (const auto& feat : features) {
+ if (!(conn_reply.features & feat.id))
+ continue;
+ debug("setting up feature %s\n", rpc::EnumNameFeature(feat.id));
+ const char* reason = feat.setup();
+ conn_reply.features &= ~feat.id;
+ std::unique_ptr<rpc::FeatureInfoRawT> res(new rpc::FeatureInfoRawT);
+ res->id = feat.id;
+ res->need_setup = true;
+ if (reason) {
+ debug("failed: %s\n", reason);
+ res->reason = reason;
+ }
+ info_req.features.push_back(std::move(res));
+ }
+ for (auto id : rpc::EnumValuesFeature()) {
+ if (!(conn_reply.features & id))
+ continue;
+ std::unique_ptr<rpc::FeatureInfoRawT> res(new rpc::FeatureInfoRawT);
+ res->id = id;
+ res->need_setup = false;
+ info_req.features.push_back(std::move(res));
+ }
+
+#if SYZ_HAVE_KCSAN
+ setup_kcsan_filter(conn_reply.race_frames);
+#endif
+
+ conn_.Send(info_req);
+
+ rpc::InfoReplyRawT info_reply;
+ conn_.Recv(info_reply);
+ debug("received info reply: covfilter=%zu\n", info_reply.cover_filter.size());
+ if (!info_reply.cover_filter.empty()) {
+ cover_filter_.emplace();
+ for (auto pc : info_reply.cover_filter)
+ cover_filter_->Insert(pc);
+ }
+
+ Select::Prepare(conn_.FD());
+ return conn_reply.procs;
+ }
+
+ void Handle(rpc::ExecRequestRawT& msg)
+ {
+ debug("recv exec request %llu: flags=0x%llx env=0x%llx exec=0x%llx size=%zu\n",
+ static_cast<uint64>(msg.id),
+ static_cast<uint64>(msg.flags),
+ static_cast<uint64>(msg.exec_opts->env_flags()),
+ static_cast<uint64>(msg.exec_opts->exec_flags()),
+ msg.prog_data.size());
+ if (IsSet(msg.flags, rpc::RequestFlag::IsBinary)) {
+ ExecuteBinary(msg);
+ return;
+ }
+ for (auto& proc : procs_) {
+ if (proc->Execute(msg))
+ return;
+ }
+ requests_.push_back(std::move(msg));
+ }
+
+ void Handle(const rpc::SignalUpdateRawT& msg)
+ {
+ debug("recv signal update: new=%zu drop=%zu\n", msg.new_max.size(), msg.drop_max.size());
+ if (!max_signal_)
+ fail("signal update when no signal filter installed");
+ for (auto pc : msg.new_max)
+ max_signal_->Insert(pc);
+ for (auto pc : msg.drop_max)
+ max_signal_->Remove(pc);
+ }
+
+ void Handle(const rpc::StartLeakChecksRawT& msg)
+ {
+ // TODO: repair leak checking (#4728).
+ debug("recv start leak checks\n");
+ }
+
+ void Handle(const rpc::StateRequestRawT& msg)
+ {
+ // Debug request about our internal state.
+ std::ostringstream ss;
+ ss << *this;
+ const std::string& str = ss.str();
+ rpc::StateResultRawT res;
+ res.data.insert(res.data.begin(), str.data(), str.data() + str.size());
+ rpc::ExecutorMessageRawT raw;
+ raw.msg.Set(std::move(res));
+ conn_.Send(raw);
+ }
+
+ void ExecuteBinary(rpc::ExecRequestRawT& msg)
+ {
+ rpc::ExecutingMessageRawT exec;
+ exec.id = msg.id;
+ rpc::ExecutorMessageRawT raw;
+ raw.msg.Set(std::move(exec));
+ conn_.Send(raw);
+
+ char dir_template[] = "syz-bin-dirXXXXXX";
+ char* dir = mkdtemp(dir_template);
+ if (dir == nullptr)
+ fail("mkdtemp failed");
+ if (chmod(dir, 0777))
+ fail("chmod failed");
+ auto [err, output] = ExecuteBinaryImpl(msg, dir);
+ if (!err.empty()) {
+ char tmp[64];
+ snprintf(tmp, sizeof(tmp), " (errno %d: %s)", errno, strerror(errno));
+ err += tmp;
+ }
+ remove_dir(dir);
+ rpc::ExecResultRawT res;
+ res.id = msg.id;
+ res.error = std::move(err);
+ res.output = std::move(output);
+ raw.msg.Set(std::move(res));
+ conn_.Send(raw);
+ }
+
+ std::tuple<std::string, std::vector<uint8_t>> ExecuteBinaryImpl(rpc::ExecRequestRawT& msg, const char* dir)
+ {
+ // For simplicity we just wait for binary tests to complete blocking everything else.
+ std::string file = std::string(dir) + "/syz-executor";
+ int fd = open(file.c_str(), O_WRONLY | O_CLOEXEC | O_CREAT, 0755);
+ if (fd == -1)
+ return {"binary file creation failed", {}};
+ ssize_t wrote = write(fd, msg.prog_data.data(), msg.prog_data.size());
+ close(fd);
+ if (wrote != static_cast<ssize_t>(msg.prog_data.size()))
+ return {"binary file write failed", {}};
+
+ int stdin_pipe[2];
+ if (pipe(stdin_pipe))
+ fail("pipe failed");
+ int stdout_pipe[2];
+ if (pipe(stdout_pipe))
+ fail("pipe failed");
+
+ const char* argv[] = {file.c_str(), nullptr};
+ std::vector<std::pair<int, int>> fds = {
+ {stdin_pipe[0], STDIN_FILENO},
+ {stdout_pipe[1], STDOUT_FILENO},
+ {stdout_pipe[1], STDERR_FILENO},
+ };
+ Subprocess process(argv, fds);
+
+ close(stdin_pipe[0]);
+ close(stdout_pipe[1]);
+
+ int status = process.WaitAndKill(5 * program_timeout_ms_);
+
+ std::vector<uint8_t> output;
+ for (;;) {
+ const size_t kChunk = 1024;
+ output.resize(output.size() + kChunk);
+ ssize_t n = read(stdout_pipe[0], output.data() + output.size() - kChunk, kChunk);
+ output.resize(output.size() - kChunk + std::max<ssize_t>(n, 0));
+ if (n <= 0)
+ break;
+ }
+ close(stdin_pipe[1]);
+ close(stdout_pipe[0]);
+
+ return {status == kFailStatus ? "process failed" : "", std::move(output)};
+ }
+};
+
+static void SigintHandler(int sig)
+{
+ // GCE VM preemption is signalled as SIGINT, notify syz-manager.
+ exitf("SYZ-EXECUTOR: PREEMPTED");
+}
+
+static void SigchldHandler(int sig)
+{
+ // We need just blocking syscall preemption.
+}
+
+static void SigsegvHandler(int sig, siginfo_t* info, void* ucontext)
+{
+ // Print minimal debugging info we can extract reasonably easy.
+ auto& mctx = static_cast<ucontext_t*>(ucontext)->uc_mcontext;
+ (void)mctx;
+ uintptr_t pc = 0xdeadbeef;
+#if GOOS_linux
+#if GOARCH_amd64
+ pc = mctx.gregs[REG_RIP];
+#elif GOARCH_arm64
+ pc = mctx.pc;
+#endif
+#endif
+ // Print the current function PC so that it's possible to map the failing PC
+ // to a symbol in the binary offline (we usually compile as PIE).
+ failmsg("SIGSEGV", "sig:%d handler:%p pc:%p addr:%p",
+ sig, SigsegvHandler, info->si_addr, reinterpret_cast<void*>(pc));
+}
+
+static void runner(char** argv, int argc)
+{
+ if (argc != 5)
+ fail("usage: syz-executor runner <name> <manager-addr> <manager-port>");
+ const char* const name = argv[2];
+ const char* const manager_addr = argv[3];
+ const char* const manager_port = argv[4];
+
+ struct rlimit rlim;
+ rlim.rlim_cur = rlim.rlim_max = kFdLimit;
+ if (setrlimit(RLIMIT_NOFILE, &rlim))
+ fail("setrlimit(RLIMIT_NOFILE) failed");
+
+ // Ignore all signals we are not interested in.
+ // In particular we want to ignore SIGPIPE, but also everything else since
+ // test processes manage to send random signals using tracepoints with bpf programs.
+ // This is not a bullet-proof protection, but it won't harm either.
+ for (int sig = 0; sig <= 64; sig++)
+ signal(sig, SIG_IGN);
+ if (signal(SIGINT, SigintHandler) == SIG_ERR)
+ fail("signal(SIGINT) failed");
+ if (signal(SIGTERM, SigintHandler) == SIG_ERR)
+ fail("signal(SIGTERM) failed");
+ if (signal(SIGCHLD, SigchldHandler) == SIG_ERR)
+ fail("signal(SIGCHLD) failed");
+ struct sigaction act = {};
+ act.sa_flags = SA_SIGINFO;
+ act.sa_sigaction = SigsegvHandler;
+ if (sigaction(SIGSEGV, &act, nullptr))
+ fail("signal(SIGSEGV) failed");
+ if (sigaction(SIGBUS, &act, nullptr))
+ fail("signal(SIGBUS) failed");
+
+ Connection conn(manager_addr, manager_port);
+ Runner(conn, name, argv[0]);
+}
diff --git a/executor/files.h b/executor/files.h
new file mode 100644
index 000000000..f952a07dc
--- /dev/null
+++ b/executor/files.h
@@ -0,0 +1,85 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <glob.h>
+#include <string.h>
+#include <unistd.h>
+
+static std::vector<std::string> Glob(const std::string& pattern)
+{
+ glob_t buf = {};
+ int res = glob(pattern.c_str(), GLOB_MARK | GLOB_NOSORT, nullptr, &buf);
+ if (res != 0 && res != GLOB_NOMATCH)
+ failmsg("glob failed", "pattern='%s' res=%d", pattern.c_str(), res);
+ std::vector<std::string> files;
+ for (size_t i = 0; i < buf.gl_pathc; i++) {
+ const char* file = buf.gl_pathv[i];
+ if (file[strlen(file) - 1] == '/')
+ continue;
+ files.push_back(file);
+ }
+ globfree(&buf);
+ return files;
+}
+
+static std::unique_ptr<rpc::FileInfoRawT> ReadFile(const std::string& file)
+{
+ auto info = std::make_unique<rpc::FileInfoRawT>();
+ info->name = file;
+ int fd = open(file.c_str(), O_RDONLY);
+ if (fd == -1) {
+ info->exists = errno != EEXIST && errno != ENOENT;
+ info->error = strerror(errno);
+ } else {
+ info->exists = true;
+ for (;;) {
+ constexpr size_t kChunk = 4 << 10;
+ info->data.resize(info->data.size() + kChunk);
+ ssize_t n = read(fd, info->data.data() + info->data.size() - kChunk, kChunk);
+ if (n < 0) {
+ info->error = strerror(errno);
+ break;
+ }
+ info->data.resize(info->data.size() - kChunk + n);
+ if (n == 0)
+ break;
+ }
+ close(fd);
+ }
+ debug("reading file %s: size=%zu exists=%d error=%s\n",
+ info->name.c_str(), info->data.size(), info->exists, info->error.c_str());
+ return info;
+}
+
+static std::vector<std::unique_ptr<rpc::FileInfoRawT>> ReadFiles(const std::vector<std::string>& files)
+{
+ std::vector<std::unique_ptr<rpc::FileInfoRawT>> results;
+ for (const auto& file : files) {
+ if (!strchr(file.c_str(), '*')) {
+ results.push_back(ReadFile(file));
+ continue;
+ }
+ for (const auto& match : Glob(file))
+ results.push_back(ReadFile(match));
+ }
+ return results;
+}
+
+static std::vector<std::unique_ptr<rpc::GlobInfoRawT>> ReadGlobs(const std::vector<std::string>& patterns)
+{
+ std::vector<std::unique_ptr<rpc::GlobInfoRawT>> results;
+ for (const auto& pattern : patterns) {
+ auto info = std::make_unique<rpc::GlobInfoRawT>();
+ info->name = pattern;
+ info->files = Glob(pattern);
+ results.push_back(std::move(info));
+ }
+ return results;
+}
diff --git a/executor/shmem.h b/executor/shmem.h
index ab9d17300..b7722ff99 100644
--- a/executor/shmem.h
+++ b/executor/shmem.h
@@ -3,6 +3,7 @@
#include <fcntl.h>
#include <stddef.h>
+#include <stdlib.h>
#include <sys/mman.h>
#include <unistd.h>
@@ -10,20 +11,22 @@
class ShmemFile
{
public:
- // Maps shared memory region of size 'size' from a new file 'file', preferably at the address 'preferred'.
- ShmemFile(const char* file, void* preferred, size_t size)
+ // Maps shared memory region of size 'size' from a new temp file.
+ ShmemFile(size_t size)
{
- fd_ = open(file, O_RDWR | O_CREAT | O_TRUNC, 0600);
+ char file_name[] = "syz.XXXXXX";
+ fd_ = mkstemp(file_name);
if (fd_ == -1)
- failmsg("shmem open failed", "file=%s", file);
- if (fallocate(fd_, 0, 0, size))
+ failmsg("shmem open failed", "file=%s", file_name);
+ if (posix_fallocate(fd_, 0, size))
failmsg("shmem fallocate failed", "size=%zu", size);
- Mmap(fd_, preferred, size, true);
- if (unlink(file))
+ Mmap(fd_, nullptr, size, true);
+ if (unlink(file_name))
fail("shmem unlink failed");
}
- // Maps shared memory region from the file 'fd' in read/write or write-only mode.
+ // Maps shared memory region from the file 'fd' in read/write or write-only mode,
+ // preferably at the address 'preferred'.
ShmemFile(int fd, void* preferred, size_t size, bool write)
{
Mmap(fd, preferred, size, write);
diff --git a/executor/style_test.go b/executor/style_test.go
index e41674d1c..c5f7177b2 100644
--- a/executor/style_test.go
+++ b/executor/style_test.go
@@ -90,9 +90,10 @@ if (foo) {
// This detects C89-style variable declarations in the beginning of block in a best-effort manner.
// Struct fields look exactly as C89 variable declarations, to filter them out we look for "{"
// at the beginning of the line.
+ // nolint: lll
pattern: `
{[^{]*
-\s+((unsigned )?[a-zA-Z][a-zA-Z0-9_]+\s*\*?|(struct )?[a-zA-Z][a-zA-Z0-9_]+\*)\s+([a-zA-Z][a-zA-Z0-9_]*(,\s*)?)+;
+\s+((unsigned )?([A-Z][A-Z0-9_]+|[a-z][a-z0-9_]+)\s*\*?|(struct )?[a-zA-Z][a-zA-Z0-9_]+\*)\s+([a-zA-Z][a-zA-Z0-9_]*(,\s*)?)+;
`,
suppression: `return |goto |va_list |pthread_|zx_`,
message: "Don't use C89 var declarations. Declare vars where they are needed and combine with initialization",
@@ -155,7 +156,7 @@ if (foo) {
re := regexp.MustCompile(check.pattern)
for _, test := range check.tests {
if !re.MatchString(test) {
- t.Fatalf("patter %q does not match test %q", check.pattern, test)
+ t.Fatalf("pattern %q does not match test %q", check.pattern, test)
}
}
}
diff --git a/executor/subprocess.h b/executor/subprocess.h
new file mode 100644
index 000000000..ef4bd9656
--- /dev/null
+++ b/executor/subprocess.h
@@ -0,0 +1,129 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+#include <spawn.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <vector>
+
+// Subprocess allows to start and wait for a subprocess.
+class Subprocess
+{
+public:
+ Subprocess(const char** argv, const std::vector<std::pair<int, int>>& fds)
+ {
+ posix_spawn_file_actions_t actions;
+ if (posix_spawn_file_actions_init(&actions))
+ fail("posix_spawn_file_actions_init failed");
+ int max_fd = 0;
+ for (auto pair : fds) {
+ max_fd = std::max(max_fd, pair.second);
+ if (pair.first != -1) {
+ if (posix_spawn_file_actions_adddup2(&actions, pair.first, pair.second))
+ fail("posix_spawn_file_actions_adddup2 failed");
+ } else {
+ if (posix_spawn_file_actions_addclose(&actions, pair.second))
+ fail("posix_spawn_file_actions_addclose failed");
+ }
+ }
+ for (int i = max_fd + 1; i < kFdLimit; i++) {
+ if (posix_spawn_file_actions_addclose(&actions, i))
+ fail("posix_spawn_file_actions_addclose failed");
+ }
+
+ posix_spawnattr_t attr;
+ if (posix_spawnattr_init(&attr))
+ fail("posix_spawnattr_init failed");
+ // Create new process group so that we can kill all processes in the group.
+ if (posix_spawnattr_setflags(&attr, POSIX_SPAWN_SETPGROUP))
+ fail("posix_spawnattr_setflags failed");
+
+ const char* child_envp[] = {
+ // Tell ASAN to not mess with our NONFAILING.
+ "ASAN_OPTIONS=handle_segv=0 allow_user_segv_handler=1",
+ // Disable rseq since we don't use it and we want to [ab]use it ourselves for kernel testing.
+ "GLIBC_TUNABLES=glibc.pthread.rseq=0",
+ nullptr};
+
+ if (posix_spawn(&pid_, argv[0], &actions, &attr,
+ const_cast<char**>(argv), const_cast<char**>(child_envp)))
+ fail("posix_spawn failed");
+
+ if (posix_spawn_file_actions_destroy(&actions))
+ fail("posix_spawn_file_actions_destroy failed");
+ if (posix_spawnattr_destroy(&attr))
+ fail("posix_spawnattr_destroy failed");
+ }
+
+ ~Subprocess()
+ {
+ if (pid_)
+ KillAndWait();
+ }
+
+ int KillAndWait()
+ {
+ if (!pid_)
+ fail("subprocess hasn't started or already waited");
+ kill(-pid_, SIGKILL);
+ kill(pid_, SIGKILL);
+ int pid = 0;
+ int wstatus = 0;
+ do
+ pid = waitpid(pid_, &wstatus, WAIT_FLAGS);
+ while (pid == -1 && errno == EINTR);
+ if (pid != pid_)
+ failmsg("child wait failed", "pid_=%d pid=%d", pid_, pid);
+ if (WIFSTOPPED(wstatus))
+ failmsg("child stopped", "status=%d", wstatus);
+ pid_ = 0;
+ return ExitStatus(wstatus);
+ }
+
+ int WaitAndKill(uint64 timeout_ms)
+ {
+ if (!pid_)
+ fail("subprocess hasn't started or already waited");
+ uint64 start = current_time_ms();
+ int wstatus = 0;
+ for (;;) {
+ sleep_ms(10);
+ if (waitpid(pid_, &wstatus, WNOHANG | WAIT_FLAGS) == pid_)
+ break;
+ if (current_time_ms() - start > timeout_ms) {
+ kill(-pid_, SIGKILL);
+ kill(pid_, SIGKILL);
+ }
+ }
+ pid_ = 0;
+ return ExitStatus(wstatus);
+ }
+
+private:
+ int pid_ = 0;
+
+ static int ExitStatus(int wstatus)
+ {
+ if (WIFEXITED(wstatus))
+ return WEXITSTATUS(wstatus);
+ if (WIFSIGNALED(wstatus)) {
+ // Map signal numbers to some reasonable exit statuses.
+ // We only log them and compare to kFailStatus, so ensure it's not kFailStatus
+ // and not 0, otherwise return the signal as is (e.g. exit status 11 is SIGSEGV).
+ switch (WTERMSIG(wstatus)) {
+ case kFailStatus:
+ return kFailStatus - 1;
+ case 0:
+ return kFailStatus - 2;
+ default:
+ return WTERMSIG(wstatus);
+ }
+ }
+ // This may be possible in WIFSTOPPED case for C programs.
+ return kFailStatus - 3;
+ }
+
+ Subprocess(const Subprocess&) = delete;
+ Subprocess& operator=(const Subprocess&) = delete;
+};
diff --git a/executor/test.h b/executor/test.h
index c49459033..e6fabf63f 100644
--- a/executor/test.h
+++ b/executor/test.h
@@ -204,7 +204,7 @@ static int test_csum_inet_acc()
static int test_cover_filter()
{
char* tmp = tempnam(nullptr, "syz-test-cover-filter");
- CoverFilter filter(tmp);
+ CoverFilter filter;
CoverFilter child(filter.FD());
free(tmp);
diff --git a/pkg/csource/options.go b/pkg/csource/options.go
index ba6dcfbed..ba44dd021 100644
--- a/pkg/csource/options.go
+++ b/pkg/csource/options.go
@@ -11,6 +11,7 @@ import (
"sort"
"strings"
+ "github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/mgrconfig"
"github.com/google/syzkaller/sys/targets"
)
@@ -364,3 +365,60 @@ var ExecutorOpts = Options{
Sandbox: "none",
UseTmpDir: true,
}
+
+func FeaturesToFlags(features flatrpc.Feature, manual Features) flatrpc.ExecEnv {
+ for feat := range flatrpc.EnumNamesFeature {
+ opt := FlatRPCFeaturesToCSource[feat]
+ if opt != "" && manual != nil && !manual[opt].Enabled {
+ features &= ^feat
+ }
+ }
+ var flags flatrpc.ExecEnv
+ if manual == nil || manual["net_reset"].Enabled {
+ flags |= flatrpc.ExecEnvEnableNetReset
+ }
+ if manual == nil || manual["cgroups"].Enabled {
+ flags |= flatrpc.ExecEnvEnableCgroups
+ }
+ if manual == nil || manual["close_fds"].Enabled {
+ flags |= flatrpc.ExecEnvEnableCloseFds
+ }
+ if features&flatrpc.FeatureExtraCoverage != 0 {
+ flags |= flatrpc.ExecEnvExtraCover
+ }
+ if features&flatrpc.FeatureDelayKcovMmap != 0 {
+ flags |= flatrpc.ExecEnvDelayKcovMmap
+ }
+ if features&flatrpc.FeatureNetInjection != 0 {
+ flags |= flatrpc.ExecEnvEnableTun
+ }
+ if features&flatrpc.FeatureNetDevices != 0 {
+ flags |= flatrpc.ExecEnvEnableNetDev
+ }
+ if features&flatrpc.FeatureDevlinkPCI != 0 {
+ flags |= flatrpc.ExecEnvEnableDevlinkPCI
+ }
+ if features&flatrpc.FeatureNicVF != 0 {
+ flags |= flatrpc.ExecEnvEnableNicVF
+ }
+ if features&flatrpc.FeatureVhciInjection != 0 {
+ flags |= flatrpc.ExecEnvEnableVhciInjection
+ }
+ if features&flatrpc.FeatureWifiEmulation != 0 {
+ flags |= flatrpc.ExecEnvEnableWifi
+ }
+ return flags
+}
+
+var FlatRPCFeaturesToCSource = map[flatrpc.Feature]string{
+ flatrpc.FeatureNetInjection: "tun",
+ flatrpc.FeatureNetDevices: "net_dev",
+ flatrpc.FeatureDevlinkPCI: "devlink_pci",
+ flatrpc.FeatureNicVF: "nic_vf",
+ flatrpc.FeatureVhciInjection: "vhci",
+ flatrpc.FeatureWifiEmulation: "wifi",
+ flatrpc.FeatureUSBEmulation: "usb",
+ flatrpc.FeatureBinFmtMisc: "binfmt_misc",
+ flatrpc.FeatureLRWPANEmulation: "ieee802154",
+ flatrpc.FeatureSwap: "swap",
+}
diff --git a/pkg/flatrpc/conn.go b/pkg/flatrpc/conn.go
index ba028fe62..9d8d1ce59 100644
--- a/pkg/flatrpc/conn.go
+++ b/pkg/flatrpc/conn.go
@@ -8,10 +8,9 @@ import (
"fmt"
"io"
"net"
- "os"
+ "reflect"
"slices"
"sync"
- "time"
flatbuffers "github.com/google/flatbuffers/go"
"github.com/google/syzkaller/pkg/log"
@@ -50,7 +49,7 @@ func ListenAndServe(addr string, handler func(*Conn)) (*Serv, error) {
continue
}
go func() {
- c := newConn(conn)
+ c := NewConn(conn)
defer c.Close()
handler(c)
}()
@@ -77,22 +76,7 @@ type Conn struct {
lastMsg int
}
-func Dial(addr string, timeScale time.Duration) (*Conn, error) {
- var conn net.Conn
- var err error
- if addr == "stdin" {
- // This is used by vm/gvisor which passes us a unix socket connection in stdin.
- conn, err = net.FileConn(os.Stdin)
- } else {
- conn, err = net.DialTimeout("tcp", addr, time.Minute*timeScale)
- }
- if err != nil {
- return nil, err
- }
- return newConn(conn), nil
-}
-
-func newConn(conn net.Conn) *Conn {
+func NewConn(conn net.Conn) *Conn {
return &Conn{
conn: conn,
builder: flatbuffers.NewBuilder(0),
@@ -125,14 +109,31 @@ func Send[T sendMsg](c *Conn, msg T) error {
return nil
}
-// Recv received an RPC message.
-// The type T is supposed to be a normal flatbuffers type (not ending with T, e.g. ConnectRequest).
+// Recv receives an RPC message.
+// The type T is supposed to be a pointer to a normal flatbuffers type (not ending with T, e.g. *ConnectRequestRaw).
// Receiving should be done from a single goroutine, the received message is valid
// only until the next Recv call (messages share the same underlying receive buffer).
-func Recv[T any, PT interface {
- *T
+func Recv[Raw interface {
+ UnPack() *T
flatbuffers.FlatBuffer
-}](c *Conn) (*T, error) {
+}, T any](c *Conn) (res *T, err0 error) {
+ defer func() {
+ if err1 := recover(); err1 != nil {
+ if err2, ok := err1.(error); ok {
+ err0 = err2
+ } else {
+ err0 = fmt.Errorf("%v", err1)
+ }
+ }
+ }()
+ raw, err := RecvRaw[Raw](c)
+ if err != nil {
+ return nil, err
+ }
+ return raw.UnPack(), nil
+}
+
+func RecvRaw[T flatbuffers.FlatBuffer](c *Conn) (T, error) {
// First, discard the previous message.
// For simplicity we copy any data from the next message to the beginning of the buffer.
// Theoretically we could something more efficient, e.g. don't copy if we already
@@ -146,21 +147,24 @@ func Recv[T any, PT interface {
sizePrefixSize = flatbuffers.SizeUint32
maxMessageSize = 64 << 20
)
- msg := PT(new(T))
+ var msg T
// Then, receive at least the size prefix (4 bytes).
// And then the full message, if we have not got it yet.
if err := c.recv(sizePrefixSize); err != nil {
- return nil, fmt.Errorf("failed to recv %T: %w", msg, err)
+ return msg, fmt.Errorf("failed to recv %T: %w", msg, err)
}
size := int(flatbuffers.GetSizePrefix(c.data, 0))
if size > maxMessageSize {
- return nil, fmt.Errorf("message %T has too large size %v", msg, size)
+ return msg, fmt.Errorf("message %T has too large size %v", msg, size)
}
c.lastMsg = sizePrefixSize + size
if err := c.recv(c.lastMsg); err != nil {
- return nil, fmt.Errorf("failed to recv %T: %w", msg, err)
+ return msg, fmt.Errorf("failed to recv %T: %w", msg, err)
}
statRecv.Add(c.lastMsg)
+ // This probably can't be expressed w/o reflect as "new U" where U is *T,
+ // but I failed to express that as generic constraints.
+ msg = reflect.New(reflect.TypeOf(msg).Elem()).Interface().(T)
data := c.data[sizePrefixSize:c.lastMsg]
msg.Init(data, flatbuffers.GetUOffsetT(data))
return msg, nil
diff --git a/pkg/flatrpc/conn_test.go b/pkg/flatrpc/conn_test.go
index 38b9e6980..a6f7f23f9 100644
--- a/pkg/flatrpc/conn_test.go
+++ b/pkg/flatrpc/conn_test.go
@@ -4,7 +4,11 @@
package flatrpc
import (
+ "net"
+ "os"
+ "syscall"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
)
@@ -40,22 +44,22 @@ func TestConn(t *testing.T) {
}()
serv, err := ListenAndServe(":0", func(c *Conn) {
defer close(done)
- connectReqGot, err := Recv[ConnectRequestRaw](c)
+ connectReqGot, err := Recv[*ConnectRequestRaw](c)
if err != nil {
t.Fatal(err)
}
- assert.Equal(t, connectReq, connectReqGot.UnPack())
+ assert.Equal(t, connectReq, connectReqGot)
if err := Send(c, connectReply); err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
- got, err := Recv[ExecutorMessageRaw](c)
+ got, err := Recv[*ExecutorMessageRaw](c)
if err != nil {
t.Fatal(err)
}
- assert.Equal(t, executorMsg, got.UnPack())
+ assert.Equal(t, executorMsg, got)
}
})
if err != nil {
@@ -63,21 +67,18 @@ func TestConn(t *testing.T) {
}
defer serv.Close()
- c, err := Dial(serv.Addr.String(), 1)
- if err != nil {
- t.Fatal(err)
- }
+ c := dial(t, serv.Addr.String())
defer c.Close()
if err := Send(c, connectReq); err != nil {
t.Fatal(err)
}
- connectReplyGot, err := Recv[ConnectReplyRaw](c)
+ connectReplyGot, err := Recv[*ConnectReplyRaw](c)
if err != nil {
t.Fatal(err)
}
- assert.Equal(t, connectReply, connectReplyGot.UnPack())
+ assert.Equal(t, connectReply, connectReplyGot)
for i := 0; i < 10; i++ {
if err := Send(c, executorMsg); err != nil {
@@ -108,7 +109,7 @@ func BenchmarkConn(b *testing.B) {
serv, err := ListenAndServe(":0", func(c *Conn) {
defer close(done)
for i := 0; i < b.N; i++ {
- _, err := Recv[ConnectRequestRaw](c)
+ _, err := Recv[*ConnectRequestRaw](c)
if err != nil {
b.Fatal(err)
}
@@ -122,10 +123,7 @@ func BenchmarkConn(b *testing.B) {
}
defer serv.Close()
- c, err := Dial(serv.Addr.String(), 1)
- if err != nil {
- b.Fatal(err)
- }
+ c := dial(b, serv.Addr.String())
defer c.Close()
b.ReportAllocs()
@@ -134,9 +132,46 @@ func BenchmarkConn(b *testing.B) {
if err := Send(c, connectReq); err != nil {
b.Fatal(err)
}
- _, err := Recv[ConnectReplyRaw](c)
+ _, err := Recv[*ConnectReplyRaw](c)
if err != nil {
b.Fatal(err)
}
}
}
+
+func dial(t testing.TB, addr string) *Conn {
+ conn, err := net.DialTimeout("tcp", addr, time.Minute)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return NewConn(conn)
+}
+
+func FuzzRecv(f *testing.F) {
+ f.Fuzz(func(t *testing.T, data []byte) {
+ data = data[:min(len(data), 1<<10)]
+ fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ w := os.NewFile(uintptr(fds[0]), "")
+ r := os.NewFile(uintptr(fds[1]), "")
+ defer w.Close()
+ defer r.Close()
+ if _, err := w.Write(data); err != nil {
+ t.Fatal(err)
+ }
+ w.Close()
+ n, err := net.FileConn(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c := NewConn(n)
+ for {
+ _, err := Recv[*ExecutorMessageRaw](c)
+ if err != nil {
+ break
+ }
+ }
+ })
+}
diff --git a/pkg/flatrpc/flatrpc.fbs b/pkg/flatrpc/flatrpc.fbs
index 78adc8ec5..b51ea0c70 100644
--- a/pkg/flatrpc/flatrpc.fbs
+++ b/pkg/flatrpc/flatrpc.fbs
@@ -8,6 +8,7 @@ enum Feature : uint64 (bit_flags) {
Comparisons,
ExtraCoverage,
DelayKcovMmap,
+ SandboxNone,
SandboxSetuid,
SandboxNamespace,
SandboxAndroid,
@@ -35,8 +36,11 @@ table ConnectRequestRaw {
table ConnectReplyRaw {
debug :bool;
+ cover :bool;
procs :int32;
slowdown :int32;
+ syscall_timeout_ms :int32;
+ program_timeout_ms :int32;
leak_frames :[string];
race_frames :[string];
// Fuzzer sets up these features and returns results in InfoRequest.features.
@@ -79,7 +83,8 @@ table FeatureInfoRaw {
union HostMessagesRaw {
ExecRequest :ExecRequestRaw,
SignalUpdate :SignalUpdateRaw,
- StartLeakChecks :StartLeakChecksRaw
+ StartLeakChecks :StartLeakChecksRaw,
+ StateRequest :StateRequestRaw,
}
table HostMessageRaw {
@@ -90,6 +95,7 @@ table HostMessageRaw {
union ExecutorMessagesRaw {
ExecResult :ExecResultRaw,
Executing :ExecutingMessageRaw,
+ State :StateResultRaw,
}
table ExecutorMessageRaw {
@@ -100,8 +106,6 @@ enum RequestFlag : uint64 (bit_flags) {
// If set, prog_data contains compiled executable binary
// that needs to be written to disk and executed.
IsBinary,
- // If set, fully reset executor state befor executing the test.
- ResetState,
// If set, collect program output and return in output field.
ReturnOutput,
// If set, don't fail on program failures, instead return the error in error field.
@@ -112,6 +116,8 @@ enum RequestFlag : uint64 (bit_flags) {
enum ExecEnv : uint64 (bit_flags) {
Debug, // debug output from executor
Signal, // collect feedback signals (coverage)
+ ResetState, // fully reset executor state befor executing the test
+ SandboxNone, // minimal sandboxing
SandboxSetuid, // impersonate nobody user
SandboxNamespace, // use namespaces for sandboxing
SandboxAndroid, // use Android sandboxing for the untrusted_app domain
@@ -150,12 +156,8 @@ table ExecRequestRaw {
prog_data :[uint8];
exec_opts :ExecOptsRaw;
flags :RequestFlag;
- signal_filter :[uint64];
- signal_filter_call :int32;
// Return all signal for these calls.
all_signal :[int32];
- // Repeat the program that many times (0 means 1).
- repeat :int32;
}
table SignalUpdateRaw {
@@ -169,6 +171,9 @@ table SignalUpdateRaw {
table StartLeakChecksRaw {
}
+table StateRequestRaw {
+}
+
// Notification from the executor that it started executing the program 'id'.
// We want this request to be as small and as fast as possible b/c we need it
// to reach the host (or at least leave the VM) before the VM crashes
@@ -209,6 +214,8 @@ struct ComparisonRaw {
table ProgInfoRaw {
calls :[CallInfoRaw];
// Contains signal and cover collected from background threads.
+ // The raw version is exported by executor, and them merged into extra on the host.
+ extra_raw :[CallInfoRaw];
extra :CallInfoRaw;
// Total execution time of the program in nanoseconds.
elapsed :uint64;
@@ -223,3 +230,7 @@ table ExecResultRaw {
error :string;
info :ProgInfoRaw;
}
+
+table StateResultRaw {
+ data :[uint8];
+}
diff --git a/pkg/flatrpc/flatrpc.go b/pkg/flatrpc/flatrpc.go
index b561334fe..28c28ca8e 100644
--- a/pkg/flatrpc/flatrpc.go
+++ b/pkg/flatrpc/flatrpc.go
@@ -15,22 +15,23 @@ const (
FeatureComparisons Feature = 2
FeatureExtraCoverage Feature = 4
FeatureDelayKcovMmap Feature = 8
- FeatureSandboxSetuid Feature = 16
- FeatureSandboxNamespace Feature = 32
- FeatureSandboxAndroid Feature = 64
- FeatureFault Feature = 128
- FeatureLeak Feature = 256
- FeatureNetInjection Feature = 512
- FeatureNetDevices Feature = 1024
- FeatureKCSAN Feature = 2048
- FeatureDevlinkPCI Feature = 4096
- FeatureNicVF Feature = 8192
- FeatureUSBEmulation Feature = 16384
- FeatureVhciInjection Feature = 32768
- FeatureWifiEmulation Feature = 65536
- FeatureLRWPANEmulation Feature = 131072
- FeatureBinFmtMisc Feature = 262144
- FeatureSwap Feature = 524288
+ FeatureSandboxNone Feature = 16
+ FeatureSandboxSetuid Feature = 32
+ FeatureSandboxNamespace Feature = 64
+ FeatureSandboxAndroid Feature = 128
+ FeatureFault Feature = 256
+ FeatureLeak Feature = 512
+ FeatureNetInjection Feature = 1024
+ FeatureNetDevices Feature = 2048
+ FeatureKCSAN Feature = 4096
+ FeatureDevlinkPCI Feature = 8192
+ FeatureNicVF Feature = 16384
+ FeatureUSBEmulation Feature = 32768
+ FeatureVhciInjection Feature = 65536
+ FeatureWifiEmulation Feature = 131072
+ FeatureLRWPANEmulation Feature = 262144
+ FeatureBinFmtMisc Feature = 524288
+ FeatureSwap Feature = 1048576
)
var EnumNamesFeature = map[Feature]string{
@@ -38,6 +39,7 @@ var EnumNamesFeature = map[Feature]string{
FeatureComparisons: "Comparisons",
FeatureExtraCoverage: "ExtraCoverage",
FeatureDelayKcovMmap: "DelayKcovMmap",
+ FeatureSandboxNone: "SandboxNone",
FeatureSandboxSetuid: "SandboxSetuid",
FeatureSandboxNamespace: "SandboxNamespace",
FeatureSandboxAndroid: "SandboxAndroid",
@@ -61,6 +63,7 @@ var EnumValuesFeature = map[string]Feature{
"Comparisons": FeatureComparisons,
"ExtraCoverage": FeatureExtraCoverage,
"DelayKcovMmap": FeatureDelayKcovMmap,
+ "SandboxNone": FeatureSandboxNone,
"SandboxSetuid": FeatureSandboxSetuid,
"SandboxNamespace": FeatureSandboxNamespace,
"SandboxAndroid": FeatureSandboxAndroid,
@@ -93,6 +96,7 @@ const (
HostMessagesRawExecRequest HostMessagesRaw = 1
HostMessagesRawSignalUpdate HostMessagesRaw = 2
HostMessagesRawStartLeakChecks HostMessagesRaw = 3
+ HostMessagesRawStateRequest HostMessagesRaw = 4
)
var EnumNamesHostMessagesRaw = map[HostMessagesRaw]string{
@@ -100,6 +104,7 @@ var EnumNamesHostMessagesRaw = map[HostMessagesRaw]string{
HostMessagesRawExecRequest: "ExecRequest",
HostMessagesRawSignalUpdate: "SignalUpdate",
HostMessagesRawStartLeakChecks: "StartLeakChecks",
+ HostMessagesRawStateRequest: "StateRequest",
}
var EnumValuesHostMessagesRaw = map[string]HostMessagesRaw{
@@ -107,6 +112,7 @@ var EnumValuesHostMessagesRaw = map[string]HostMessagesRaw{
"ExecRequest": HostMessagesRawExecRequest,
"SignalUpdate": HostMessagesRawSignalUpdate,
"StartLeakChecks": HostMessagesRawStartLeakChecks,
+ "StateRequest": HostMessagesRawStateRequest,
}
func (v HostMessagesRaw) String() string {
@@ -132,6 +138,8 @@ func (t *HostMessagesRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse
return t.Value.(*SignalUpdateRawT).Pack(builder)
case HostMessagesRawStartLeakChecks:
return t.Value.(*StartLeakChecksRawT).Pack(builder)
+ case HostMessagesRawStateRequest:
+ return t.Value.(*StateRequestRawT).Pack(builder)
}
return 0
}
@@ -147,6 +155,9 @@ func (rcv HostMessagesRaw) UnPack(table flatbuffers.Table) *HostMessagesRawT {
case HostMessagesRawStartLeakChecks:
x := StartLeakChecksRaw{_tab: table}
return &HostMessagesRawT{Type: HostMessagesRawStartLeakChecks, Value: x.UnPack()}
+ case HostMessagesRawStateRequest:
+ x := StateRequestRaw{_tab: table}
+ return &HostMessagesRawT{Type: HostMessagesRawStateRequest, Value: x.UnPack()}
}
return nil
}
@@ -157,18 +168,21 @@ const (
ExecutorMessagesRawNONE ExecutorMessagesRaw = 0
ExecutorMessagesRawExecResult ExecutorMessagesRaw = 1
ExecutorMessagesRawExecuting ExecutorMessagesRaw = 2
+ ExecutorMessagesRawState ExecutorMessagesRaw = 3
)
var EnumNamesExecutorMessagesRaw = map[ExecutorMessagesRaw]string{
ExecutorMessagesRawNONE: "NONE",
ExecutorMessagesRawExecResult: "ExecResult",
ExecutorMessagesRawExecuting: "Executing",
+ ExecutorMessagesRawState: "State",
}
var EnumValuesExecutorMessagesRaw = map[string]ExecutorMessagesRaw{
"NONE": ExecutorMessagesRawNONE,
"ExecResult": ExecutorMessagesRawExecResult,
"Executing": ExecutorMessagesRawExecuting,
+ "State": ExecutorMessagesRawState,
}
func (v ExecutorMessagesRaw) String() string {
@@ -192,6 +206,8 @@ func (t *ExecutorMessagesRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UO
return t.Value.(*ExecResultRawT).Pack(builder)
case ExecutorMessagesRawExecuting:
return t.Value.(*ExecutingMessageRawT).Pack(builder)
+ case ExecutorMessagesRawState:
+ return t.Value.(*StateResultRawT).Pack(builder)
}
return 0
}
@@ -204,6 +220,9 @@ func (rcv ExecutorMessagesRaw) UnPack(table flatbuffers.Table) *ExecutorMessages
case ExecutorMessagesRawExecuting:
x := ExecutingMessageRaw{_tab: table}
return &ExecutorMessagesRawT{Type: ExecutorMessagesRawExecuting, Value: x.UnPack()}
+ case ExecutorMessagesRawState:
+ x := StateResultRaw{_tab: table}
+ return &ExecutorMessagesRawT{Type: ExecutorMessagesRawState, Value: x.UnPack()}
}
return nil
}
@@ -212,21 +231,18 @@ type RequestFlag uint64
const (
RequestFlagIsBinary RequestFlag = 1
- RequestFlagResetState RequestFlag = 2
- RequestFlagReturnOutput RequestFlag = 4
- RequestFlagReturnError RequestFlag = 8
+ RequestFlagReturnOutput RequestFlag = 2
+ RequestFlagReturnError RequestFlag = 4
)
var EnumNamesRequestFlag = map[RequestFlag]string{
RequestFlagIsBinary: "IsBinary",
- RequestFlagResetState: "ResetState",
RequestFlagReturnOutput: "ReturnOutput",
RequestFlagReturnError: "ReturnError",
}
var EnumValuesRequestFlag = map[string]RequestFlag{
"IsBinary": RequestFlagIsBinary,
- "ResetState": RequestFlagResetState,
"ReturnOutput": RequestFlagReturnOutput,
"ReturnError": RequestFlagReturnError,
}
@@ -243,25 +259,29 @@ type ExecEnv uint64
const (
ExecEnvDebug ExecEnv = 1
ExecEnvSignal ExecEnv = 2
- ExecEnvSandboxSetuid ExecEnv = 4
- ExecEnvSandboxNamespace ExecEnv = 8
- ExecEnvSandboxAndroid ExecEnv = 16
- ExecEnvExtraCover ExecEnv = 32
- ExecEnvEnableTun ExecEnv = 64
- ExecEnvEnableNetDev ExecEnv = 128
- ExecEnvEnableNetReset ExecEnv = 256
- ExecEnvEnableCgroups ExecEnv = 512
- ExecEnvEnableCloseFds ExecEnv = 1024
- ExecEnvEnableDevlinkPCI ExecEnv = 2048
- ExecEnvEnableVhciInjection ExecEnv = 4096
- ExecEnvEnableWifi ExecEnv = 8192
- ExecEnvDelayKcovMmap ExecEnv = 16384
- ExecEnvEnableNicVF ExecEnv = 32768
+ ExecEnvResetState ExecEnv = 4
+ ExecEnvSandboxNone ExecEnv = 8
+ ExecEnvSandboxSetuid ExecEnv = 16
+ ExecEnvSandboxNamespace ExecEnv = 32
+ ExecEnvSandboxAndroid ExecEnv = 64
+ ExecEnvExtraCover ExecEnv = 128
+ ExecEnvEnableTun ExecEnv = 256
+ ExecEnvEnableNetDev ExecEnv = 512
+ ExecEnvEnableNetReset ExecEnv = 1024
+ ExecEnvEnableCgroups ExecEnv = 2048
+ ExecEnvEnableCloseFds ExecEnv = 4096
+ ExecEnvEnableDevlinkPCI ExecEnv = 8192
+ ExecEnvEnableVhciInjection ExecEnv = 16384
+ ExecEnvEnableWifi ExecEnv = 32768
+ ExecEnvDelayKcovMmap ExecEnv = 65536
+ ExecEnvEnableNicVF ExecEnv = 131072
)
var EnumNamesExecEnv = map[ExecEnv]string{
ExecEnvDebug: "Debug",
ExecEnvSignal: "Signal",
+ ExecEnvResetState: "ResetState",
+ ExecEnvSandboxNone: "SandboxNone",
ExecEnvSandboxSetuid: "SandboxSetuid",
ExecEnvSandboxNamespace: "SandboxNamespace",
ExecEnvSandboxAndroid: "SandboxAndroid",
@@ -281,6 +301,8 @@ var EnumNamesExecEnv = map[ExecEnv]string{
var EnumValuesExecEnv = map[string]ExecEnv{
"Debug": ExecEnvDebug,
"Signal": ExecEnvSignal,
+ "ResetState": ExecEnvResetState,
+ "SandboxNone": ExecEnvSandboxNone,
"SandboxSetuid": ExecEnvSandboxSetuid,
"SandboxNamespace": ExecEnvSandboxNamespace,
"SandboxAndroid": ExecEnvSandboxAndroid,
@@ -485,14 +507,17 @@ func ConnectRequestRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
}
type ConnectReplyRawT struct {
- Debug bool `json:"debug"`
- Procs int32 `json:"procs"`
- Slowdown int32 `json:"slowdown"`
- LeakFrames []string `json:"leak_frames"`
- RaceFrames []string `json:"race_frames"`
- Features Feature `json:"features"`
- Files []string `json:"files"`
- Globs []string `json:"globs"`
+ Debug bool `json:"debug"`
+ Cover bool `json:"cover"`
+ Procs int32 `json:"procs"`
+ Slowdown int32 `json:"slowdown"`
+ SyscallTimeoutMs int32 `json:"syscall_timeout_ms"`
+ ProgramTimeoutMs int32 `json:"program_timeout_ms"`
+ LeakFrames []string `json:"leak_frames"`
+ RaceFrames []string `json:"race_frames"`
+ Features Feature `json:"features"`
+ Files []string `json:"files"`
+ Globs []string `json:"globs"`
}
func (t *ConnectReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
@@ -553,8 +578,11 @@ func (t *ConnectReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse
}
ConnectReplyRawStart(builder)
ConnectReplyRawAddDebug(builder, t.Debug)
+ ConnectReplyRawAddCover(builder, t.Cover)
ConnectReplyRawAddProcs(builder, t.Procs)
ConnectReplyRawAddSlowdown(builder, t.Slowdown)
+ ConnectReplyRawAddSyscallTimeoutMs(builder, t.SyscallTimeoutMs)
+ ConnectReplyRawAddProgramTimeoutMs(builder, t.ProgramTimeoutMs)
ConnectReplyRawAddLeakFrames(builder, leakFramesOffset)
ConnectReplyRawAddRaceFrames(builder, raceFramesOffset)
ConnectReplyRawAddFeatures(builder, t.Features)
@@ -565,8 +593,11 @@ func (t *ConnectReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse
func (rcv *ConnectReplyRaw) UnPackTo(t *ConnectReplyRawT) {
t.Debug = rcv.Debug()
+ t.Cover = rcv.Cover()
t.Procs = rcv.Procs()
t.Slowdown = rcv.Slowdown()
+ t.SyscallTimeoutMs = rcv.SyscallTimeoutMs()
+ t.ProgramTimeoutMs = rcv.ProgramTimeoutMs()
leakFramesLength := rcv.LeakFramesLength()
t.LeakFrames = make([]string, leakFramesLength)
for j := 0; j < leakFramesLength; j++ {
@@ -638,20 +669,32 @@ func (rcv *ConnectReplyRaw) MutateDebug(n bool) bool {
return rcv._tab.MutateBoolSlot(4, n)
}
-func (rcv *ConnectReplyRaw) Procs() int32 {
+func (rcv *ConnectReplyRaw) Cover() bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+func (rcv *ConnectReplyRaw) MutateCover(n bool) bool {
+ return rcv._tab.MutateBoolSlot(6, n)
+}
+
+func (rcv *ConnectReplyRaw) Procs() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
return rcv._tab.GetInt32(o + rcv._tab.Pos)
}
return 0
}
func (rcv *ConnectReplyRaw) MutateProcs(n int32) bool {
- return rcv._tab.MutateInt32Slot(6, n)
+ return rcv._tab.MutateInt32Slot(8, n)
}
func (rcv *ConnectReplyRaw) Slowdown() int32 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
return rcv._tab.GetInt32(o + rcv._tab.Pos)
}
@@ -659,11 +702,35 @@ func (rcv *ConnectReplyRaw) Slowdown() int32 {
}
func (rcv *ConnectReplyRaw) MutateSlowdown(n int32) bool {
- return rcv._tab.MutateInt32Slot(8, n)
+ return rcv._tab.MutateInt32Slot(10, n)
+}
+
+func (rcv *ConnectReplyRaw) SyscallTimeoutMs() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ return rcv._tab.GetInt32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *ConnectReplyRaw) MutateSyscallTimeoutMs(n int32) bool {
+ return rcv._tab.MutateInt32Slot(12, n)
+}
+
+func (rcv *ConnectReplyRaw) ProgramTimeoutMs() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
+ if o != 0 {
+ return rcv._tab.GetInt32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *ConnectReplyRaw) MutateProgramTimeoutMs(n int32) bool {
+ return rcv._tab.MutateInt32Slot(14, n)
}
func (rcv *ConnectReplyRaw) LeakFrames(j int) []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
@@ -672,7 +739,7 @@ func (rcv *ConnectReplyRaw) LeakFrames(j int) []byte {
}
func (rcv *ConnectReplyRaw) LeakFramesLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -680,7 +747,7 @@ func (rcv *ConnectReplyRaw) LeakFramesLength() int {
}
func (rcv *ConnectReplyRaw) RaceFrames(j int) []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
@@ -689,7 +756,7 @@ func (rcv *ConnectReplyRaw) RaceFrames(j int) []byte {
}
func (rcv *ConnectReplyRaw) RaceFramesLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -697,7 +764,7 @@ func (rcv *ConnectReplyRaw) RaceFramesLength() int {
}
func (rcv *ConnectReplyRaw) Features() Feature {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
if o != 0 {
return Feature(rcv._tab.GetUint64(o + rcv._tab.Pos))
}
@@ -705,11 +772,11 @@ func (rcv *ConnectReplyRaw) Features() Feature {
}
func (rcv *ConnectReplyRaw) MutateFeatures(n Feature) bool {
- return rcv._tab.MutateUint64Slot(14, uint64(n))
+ return rcv._tab.MutateUint64Slot(20, uint64(n))
}
func (rcv *ConnectReplyRaw) Files(j int) []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
@@ -718,7 +785,7 @@ func (rcv *ConnectReplyRaw) Files(j int) []byte {
}
func (rcv *ConnectReplyRaw) FilesLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -726,7 +793,7 @@ func (rcv *ConnectReplyRaw) FilesLength() int {
}
func (rcv *ConnectReplyRaw) Globs(j int) []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(24))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
@@ -735,7 +802,7 @@ func (rcv *ConnectReplyRaw) Globs(j int) []byte {
}
func (rcv *ConnectReplyRaw) GlobsLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(24))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -743,40 +810,49 @@ func (rcv *ConnectReplyRaw) GlobsLength() int {
}
func ConnectReplyRawStart(builder *flatbuffers.Builder) {
- builder.StartObject(8)
+ builder.StartObject(11)
}
func ConnectReplyRawAddDebug(builder *flatbuffers.Builder, debug bool) {
builder.PrependBoolSlot(0, debug, false)
}
+func ConnectReplyRawAddCover(builder *flatbuffers.Builder, cover bool) {
+ builder.PrependBoolSlot(1, cover, false)
+}
func ConnectReplyRawAddProcs(builder *flatbuffers.Builder, procs int32) {
- builder.PrependInt32Slot(1, procs, 0)
+ builder.PrependInt32Slot(2, procs, 0)
}
func ConnectReplyRawAddSlowdown(builder *flatbuffers.Builder, slowdown int32) {
- builder.PrependInt32Slot(2, slowdown, 0)
+ builder.PrependInt32Slot(3, slowdown, 0)
+}
+func ConnectReplyRawAddSyscallTimeoutMs(builder *flatbuffers.Builder, syscallTimeoutMs int32) {
+ builder.PrependInt32Slot(4, syscallTimeoutMs, 0)
+}
+func ConnectReplyRawAddProgramTimeoutMs(builder *flatbuffers.Builder, programTimeoutMs int32) {
+ builder.PrependInt32Slot(5, programTimeoutMs, 0)
}
func ConnectReplyRawAddLeakFrames(builder *flatbuffers.Builder, leakFrames flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(leakFrames), 0)
+ builder.PrependUOffsetTSlot(6, flatbuffers.UOffsetT(leakFrames), 0)
}
func ConnectReplyRawStartLeakFramesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func ConnectReplyRawAddRaceFrames(builder *flatbuffers.Builder, raceFrames flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(raceFrames), 0)
+ builder.PrependUOffsetTSlot(7, flatbuffers.UOffsetT(raceFrames), 0)
}
func ConnectReplyRawStartRaceFramesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func ConnectReplyRawAddFeatures(builder *flatbuffers.Builder, features Feature) {
- builder.PrependUint64Slot(5, uint64(features), 0)
+ builder.PrependUint64Slot(8, uint64(features), 0)
}
func ConnectReplyRawAddFiles(builder *flatbuffers.Builder, files flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(6, flatbuffers.UOffsetT(files), 0)
+ builder.PrependUOffsetTSlot(9, flatbuffers.UOffsetT(files), 0)
}
func ConnectReplyRawStartFilesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func ConnectReplyRawAddGlobs(builder *flatbuffers.Builder, globs flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(7, flatbuffers.UOffsetT(globs), 0)
+ builder.PrependUOffsetTSlot(10, flatbuffers.UOffsetT(globs), 0)
}
func ConnectReplyRawStartGlobsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
@@ -1741,14 +1817,11 @@ func CreateExecOptsRaw(builder *flatbuffers.Builder, envFlags ExecEnv, execFlags
}
type ExecRequestRawT struct {
- Id int64 `json:"id"`
- ProgData []byte `json:"prog_data"`
- ExecOpts *ExecOptsRawT `json:"exec_opts"`
- Flags RequestFlag `json:"flags"`
- SignalFilter []uint64 `json:"signal_filter"`
- SignalFilterCall int32 `json:"signal_filter_call"`
- AllSignal []int32 `json:"all_signal"`
- Repeat int32 `json:"repeat"`
+ Id int64 `json:"id"`
+ ProgData []byte `json:"prog_data"`
+ ExecOpts *ExecOptsRawT `json:"exec_opts"`
+ Flags RequestFlag `json:"flags"`
+ AllSignal []int32 `json:"all_signal"`
}
func (t *ExecRequestRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
@@ -1759,15 +1832,6 @@ func (t *ExecRequestRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffset
if t.ProgData != nil {
progDataOffset = builder.CreateByteString(t.ProgData)
}
- signalFilterOffset := flatbuffers.UOffsetT(0)
- if t.SignalFilter != nil {
- signalFilterLength := len(t.SignalFilter)
- ExecRequestRawStartSignalFilterVector(builder, signalFilterLength)
- for j := signalFilterLength - 1; j >= 0; j-- {
- builder.PrependUint64(t.SignalFilter[j])
- }
- signalFilterOffset = builder.EndVector(signalFilterLength)
- }
allSignalOffset := flatbuffers.UOffsetT(0)
if t.AllSignal != nil {
allSignalLength := len(t.AllSignal)
@@ -1783,10 +1847,7 @@ func (t *ExecRequestRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffset
execOptsOffset := t.ExecOpts.Pack(builder)
ExecRequestRawAddExecOpts(builder, execOptsOffset)
ExecRequestRawAddFlags(builder, t.Flags)
- ExecRequestRawAddSignalFilter(builder, signalFilterOffset)
- ExecRequestRawAddSignalFilterCall(builder, t.SignalFilterCall)
ExecRequestRawAddAllSignal(builder, allSignalOffset)
- ExecRequestRawAddRepeat(builder, t.Repeat)
return ExecRequestRawEnd(builder)
}
@@ -1795,18 +1856,11 @@ func (rcv *ExecRequestRaw) UnPackTo(t *ExecRequestRawT) {
t.ProgData = rcv.ProgDataBytes()
t.ExecOpts = rcv.ExecOpts(nil).UnPack()
t.Flags = rcv.Flags()
- signalFilterLength := rcv.SignalFilterLength()
- t.SignalFilter = make([]uint64, signalFilterLength)
- for j := 0; j < signalFilterLength; j++ {
- t.SignalFilter[j] = rcv.SignalFilter(j)
- }
- t.SignalFilterCall = rcv.SignalFilterCall()
allSignalLength := rcv.AllSignalLength()
t.AllSignal = make([]int32, allSignalLength)
for j := 0; j < allSignalLength; j++ {
t.AllSignal[j] = rcv.AllSignal(j)
}
- t.Repeat = rcv.Repeat()
}
func (rcv *ExecRequestRaw) UnPack() *ExecRequestRawT {
@@ -1916,46 +1970,8 @@ func (rcv *ExecRequestRaw) MutateFlags(n RequestFlag) bool {
return rcv._tab.MutateUint64Slot(10, uint64(n))
}
-func (rcv *ExecRequestRaw) SignalFilter(j int) uint64 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
- if o != 0 {
- a := rcv._tab.Vector(o)
- return rcv._tab.GetUint64(a + flatbuffers.UOffsetT(j*8))
- }
- return 0
-}
-
-func (rcv *ExecRequestRaw) SignalFilterLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
- if o != 0 {
- return rcv._tab.VectorLen(o)
- }
- return 0
-}
-
-func (rcv *ExecRequestRaw) MutateSignalFilter(j int, n uint64) bool {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
- if o != 0 {
- a := rcv._tab.Vector(o)
- return rcv._tab.MutateUint64(a+flatbuffers.UOffsetT(j*8), n)
- }
- return false
-}
-
-func (rcv *ExecRequestRaw) SignalFilterCall() int32 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
- if o != 0 {
- return rcv._tab.GetInt32(o + rcv._tab.Pos)
- }
- return 0
-}
-
-func (rcv *ExecRequestRaw) MutateSignalFilterCall(n int32) bool {
- return rcv._tab.MutateInt32Slot(14, n)
-}
-
func (rcv *ExecRequestRaw) AllSignal(j int) int32 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetInt32(a + flatbuffers.UOffsetT(j*4))
@@ -1964,7 +1980,7 @@ func (rcv *ExecRequestRaw) AllSignal(j int) int32 {
}
func (rcv *ExecRequestRaw) AllSignalLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -1972,7 +1988,7 @@ func (rcv *ExecRequestRaw) AllSignalLength() int {
}
func (rcv *ExecRequestRaw) MutateAllSignal(j int, n int32) bool {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.MutateInt32(a+flatbuffers.UOffsetT(j*4), n)
@@ -1980,20 +1996,8 @@ func (rcv *ExecRequestRaw) MutateAllSignal(j int, n int32) bool {
return false
}
-func (rcv *ExecRequestRaw) Repeat() int32 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
- if o != 0 {
- return rcv._tab.GetInt32(o + rcv._tab.Pos)
- }
- return 0
-}
-
-func (rcv *ExecRequestRaw) MutateRepeat(n int32) bool {
- return rcv._tab.MutateInt32Slot(18, n)
-}
-
func ExecRequestRawStart(builder *flatbuffers.Builder) {
- builder.StartObject(8)
+ builder.StartObject(5)
}
func ExecRequestRawAddId(builder *flatbuffers.Builder, id int64) {
builder.PrependInt64Slot(0, id, 0)
@@ -2010,24 +2014,12 @@ func ExecRequestRawAddExecOpts(builder *flatbuffers.Builder, execOpts flatbuffer
func ExecRequestRawAddFlags(builder *flatbuffers.Builder, flags RequestFlag) {
builder.PrependUint64Slot(3, uint64(flags), 0)
}
-func ExecRequestRawAddSignalFilter(builder *flatbuffers.Builder, signalFilter flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(signalFilter), 0)
-}
-func ExecRequestRawStartSignalFilterVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
- return builder.StartVector(8, numElems, 8)
-}
-func ExecRequestRawAddSignalFilterCall(builder *flatbuffers.Builder, signalFilterCall int32) {
- builder.PrependInt32Slot(5, signalFilterCall, 0)
-}
func ExecRequestRawAddAllSignal(builder *flatbuffers.Builder, allSignal flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(6, flatbuffers.UOffsetT(allSignal), 0)
+ builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(allSignal), 0)
}
func ExecRequestRawStartAllSignalVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
-func ExecRequestRawAddRepeat(builder *flatbuffers.Builder, repeat int32) {
- builder.PrependInt32Slot(7, repeat, 0)
-}
func ExecRequestRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
@@ -2242,6 +2234,63 @@ func StartLeakChecksRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
+type StateRequestRawT struct {
+}
+
+func (t *StateRequestRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ if t == nil {
+ return 0
+ }
+ StateRequestRawStart(builder)
+ return StateRequestRawEnd(builder)
+}
+
+func (rcv *StateRequestRaw) UnPackTo(t *StateRequestRawT) {
+}
+
+func (rcv *StateRequestRaw) UnPack() *StateRequestRawT {
+ if rcv == nil {
+ return nil
+ }
+ t := &StateRequestRawT{}
+ rcv.UnPackTo(t)
+ return t
+}
+
+type StateRequestRaw struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsStateRequestRaw(buf []byte, offset flatbuffers.UOffsetT) *StateRequestRaw {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &StateRequestRaw{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func GetSizePrefixedRootAsStateRequestRaw(buf []byte, offset flatbuffers.UOffsetT) *StateRequestRaw {
+ n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
+ x := &StateRequestRaw{}
+ x.Init(buf, n+offset+flatbuffers.SizeUint32)
+ return x
+}
+
+func (rcv *StateRequestRaw) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *StateRequestRaw) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func StateRequestRawStart(builder *flatbuffers.Builder) {
+ builder.StartObject(0)
+}
+func StateRequestRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
+
type ExecutingMessageRawT struct {
Id int64 `json:"id"`
ProcId int32 `json:"proc_id"`
@@ -2664,6 +2713,7 @@ func CreateComparisonRaw(builder *flatbuffers.Builder, op1 uint64, op2 uint64) f
type ProgInfoRawT struct {
Calls []*CallInfoRawT `json:"calls"`
+ ExtraRaw []*CallInfoRawT `json:"extra_raw"`
Extra *CallInfoRawT `json:"extra"`
Elapsed uint64 `json:"elapsed"`
Freshness uint64 `json:"freshness"`
@@ -2686,9 +2736,23 @@ func (t *ProgInfoRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
}
callsOffset = builder.EndVector(callsLength)
}
+ extraRawOffset := flatbuffers.UOffsetT(0)
+ if t.ExtraRaw != nil {
+ extraRawLength := len(t.ExtraRaw)
+ extraRawOffsets := make([]flatbuffers.UOffsetT, extraRawLength)
+ for j := 0; j < extraRawLength; j++ {
+ extraRawOffsets[j] = t.ExtraRaw[j].Pack(builder)
+ }
+ ProgInfoRawStartExtraRawVector(builder, extraRawLength)
+ for j := extraRawLength - 1; j >= 0; j-- {
+ builder.PrependUOffsetT(extraRawOffsets[j])
+ }
+ extraRawOffset = builder.EndVector(extraRawLength)
+ }
extraOffset := t.Extra.Pack(builder)
ProgInfoRawStart(builder)
ProgInfoRawAddCalls(builder, callsOffset)
+ ProgInfoRawAddExtraRaw(builder, extraRawOffset)
ProgInfoRawAddExtra(builder, extraOffset)
ProgInfoRawAddElapsed(builder, t.Elapsed)
ProgInfoRawAddFreshness(builder, t.Freshness)
@@ -2703,6 +2767,13 @@ func (rcv *ProgInfoRaw) UnPackTo(t *ProgInfoRawT) {
rcv.Calls(&x, j)
t.Calls[j] = x.UnPack()
}
+ extraRawLength := rcv.ExtraRawLength()
+ t.ExtraRaw = make([]*CallInfoRawT, extraRawLength)
+ for j := 0; j < extraRawLength; j++ {
+ x := CallInfoRaw{}
+ rcv.ExtraRaw(&x, j)
+ t.ExtraRaw[j] = x.UnPack()
+ }
t.Extra = rcv.Extra(nil).UnPack()
t.Elapsed = rcv.Elapsed()
t.Freshness = rcv.Freshness()
@@ -2764,9 +2835,29 @@ func (rcv *ProgInfoRaw) CallsLength() int {
return 0
}
-func (rcv *ProgInfoRaw) Extra(obj *CallInfoRaw) *CallInfoRaw {
+func (rcv *ProgInfoRaw) ExtraRaw(obj *CallInfoRaw, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 4
+ x = rcv._tab.Indirect(x)
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *ProgInfoRaw) ExtraRawLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+func (rcv *ProgInfoRaw) Extra(obj *CallInfoRaw) *CallInfoRaw {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(CallInfoRaw)
@@ -2778,7 +2869,7 @@ func (rcv *ProgInfoRaw) Extra(obj *CallInfoRaw) *CallInfoRaw {
}
func (rcv *ProgInfoRaw) Elapsed() uint64 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
return rcv._tab.GetUint64(o + rcv._tab.Pos)
}
@@ -2786,11 +2877,11 @@ func (rcv *ProgInfoRaw) Elapsed() uint64 {
}
func (rcv *ProgInfoRaw) MutateElapsed(n uint64) bool {
- return rcv._tab.MutateUint64Slot(8, n)
+ return rcv._tab.MutateUint64Slot(10, n)
}
func (rcv *ProgInfoRaw) Freshness() uint64 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
return rcv._tab.GetUint64(o + rcv._tab.Pos)
}
@@ -2798,11 +2889,11 @@ func (rcv *ProgInfoRaw) Freshness() uint64 {
}
func (rcv *ProgInfoRaw) MutateFreshness(n uint64) bool {
- return rcv._tab.MutateUint64Slot(10, n)
+ return rcv._tab.MutateUint64Slot(12, n)
}
func ProgInfoRawStart(builder *flatbuffers.Builder) {
- builder.StartObject(4)
+ builder.StartObject(5)
}
func ProgInfoRawAddCalls(builder *flatbuffers.Builder, calls flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(calls), 0)
@@ -2810,14 +2901,20 @@ func ProgInfoRawAddCalls(builder *flatbuffers.Builder, calls flatbuffers.UOffset
func ProgInfoRawStartCallsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
+func ProgInfoRawAddExtraRaw(builder *flatbuffers.Builder, extraRaw flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(extraRaw), 0)
+}
+func ProgInfoRawStartExtraRawVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(4, numElems, 4)
+}
func ProgInfoRawAddExtra(builder *flatbuffers.Builder, extra flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(extra), 0)
+ builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(extra), 0)
}
func ProgInfoRawAddElapsed(builder *flatbuffers.Builder, elapsed uint64) {
- builder.PrependUint64Slot(2, elapsed, 0)
+ builder.PrependUint64Slot(3, elapsed, 0)
}
func ProgInfoRawAddFreshness(builder *flatbuffers.Builder, freshness uint64) {
- builder.PrependUint64Slot(3, freshness, 0)
+ builder.PrependUint64Slot(4, freshness, 0)
}
func ProgInfoRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
@@ -2979,3 +3076,107 @@ func ExecResultRawAddInfo(builder *flatbuffers.Builder, info flatbuffers.UOffset
func ExecResultRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
+
+type StateResultRawT struct {
+ Data []byte `json:"data"`
+}
+
+func (t *StateResultRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ if t == nil {
+ return 0
+ }
+ dataOffset := flatbuffers.UOffsetT(0)
+ if t.Data != nil {
+ dataOffset = builder.CreateByteString(t.Data)
+ }
+ StateResultRawStart(builder)
+ StateResultRawAddData(builder, dataOffset)
+ return StateResultRawEnd(builder)
+}
+
+func (rcv *StateResultRaw) UnPackTo(t *StateResultRawT) {
+ t.Data = rcv.DataBytes()
+}
+
+func (rcv *StateResultRaw) UnPack() *StateResultRawT {
+ if rcv == nil {
+ return nil
+ }
+ t := &StateResultRawT{}
+ rcv.UnPackTo(t)
+ return t
+}
+
+type StateResultRaw struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsStateResultRaw(buf []byte, offset flatbuffers.UOffsetT) *StateResultRaw {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &StateResultRaw{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func GetSizePrefixedRootAsStateResultRaw(buf []byte, offset flatbuffers.UOffsetT) *StateResultRaw {
+ n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
+ x := &StateResultRaw{}
+ x.Init(buf, n+offset+flatbuffers.SizeUint32)
+ return x
+}
+
+func (rcv *StateResultRaw) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *StateResultRaw) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *StateResultRaw) Data(j int) byte {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1))
+ }
+ return 0
+}
+
+func (rcv *StateResultRaw) DataLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+func (rcv *StateResultRaw) DataBytes() []byte {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.ByteVector(o + rcv._tab.Pos)
+ }
+ return nil
+}
+
+func (rcv *StateResultRaw) MutateData(j int, n byte) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.MutateByte(a+flatbuffers.UOffsetT(j*1), n)
+ }
+ return false
+}
+
+func StateResultRawStart(builder *flatbuffers.Builder) {
+ builder.StartObject(1)
+}
+func StateResultRawAddData(builder *flatbuffers.Builder, data flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(data), 0)
+}
+func StateResultRawStartDataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(1, numElems, 1)
+}
+func StateResultRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/pkg/flatrpc/flatrpc.h b/pkg/flatrpc/flatrpc.h
index d430e48f2..6f448c410 100644
--- a/pkg/flatrpc/flatrpc.h
+++ b/pkg/flatrpc/flatrpc.h
@@ -65,6 +65,10 @@ struct StartLeakChecksRaw;
struct StartLeakChecksRawBuilder;
struct StartLeakChecksRawT;
+struct StateRequestRaw;
+struct StateRequestRawBuilder;
+struct StateRequestRawT;
+
struct ExecutingMessageRaw;
struct ExecutingMessageRawBuilder;
struct ExecutingMessageRawT;
@@ -83,38 +87,44 @@ struct ExecResultRaw;
struct ExecResultRawBuilder;
struct ExecResultRawT;
+struct StateResultRaw;
+struct StateResultRawBuilder;
+struct StateResultRawT;
+
enum class Feature : uint64_t {
Coverage = 1ULL,
Comparisons = 2ULL,
ExtraCoverage = 4ULL,
DelayKcovMmap = 8ULL,
- SandboxSetuid = 16ULL,
- SandboxNamespace = 32ULL,
- SandboxAndroid = 64ULL,
- Fault = 128ULL,
- Leak = 256ULL,
- NetInjection = 512ULL,
- NetDevices = 1024ULL,
- KCSAN = 2048ULL,
- DevlinkPCI = 4096ULL,
- NicVF = 8192ULL,
- USBEmulation = 16384ULL,
- VhciInjection = 32768ULL,
- WifiEmulation = 65536ULL,
- LRWPANEmulation = 131072ULL,
- BinFmtMisc = 262144ULL,
- Swap = 524288ULL,
+ SandboxNone = 16ULL,
+ SandboxSetuid = 32ULL,
+ SandboxNamespace = 64ULL,
+ SandboxAndroid = 128ULL,
+ Fault = 256ULL,
+ Leak = 512ULL,
+ NetInjection = 1024ULL,
+ NetDevices = 2048ULL,
+ KCSAN = 4096ULL,
+ DevlinkPCI = 8192ULL,
+ NicVF = 16384ULL,
+ USBEmulation = 32768ULL,
+ VhciInjection = 65536ULL,
+ WifiEmulation = 131072ULL,
+ LRWPANEmulation = 262144ULL,
+ BinFmtMisc = 524288ULL,
+ Swap = 1048576ULL,
NONE = 0,
- ANY = 1048575ULL
+ ANY = 2097151ULL
};
FLATBUFFERS_DEFINE_BITMASK_OPERATORS(Feature, uint64_t)
-inline const Feature (&EnumValuesFeature())[20] {
+inline const Feature (&EnumValuesFeature())[21] {
static const Feature values[] = {
Feature::Coverage,
Feature::Comparisons,
Feature::ExtraCoverage,
Feature::DelayKcovMmap,
+ Feature::SandboxNone,
Feature::SandboxSetuid,
Feature::SandboxNamespace,
Feature::SandboxAndroid,
@@ -141,6 +151,7 @@ inline const char *EnumNameFeature(Feature e) {
case Feature::Comparisons: return "Comparisons";
case Feature::ExtraCoverage: return "ExtraCoverage";
case Feature::DelayKcovMmap: return "DelayKcovMmap";
+ case Feature::SandboxNone: return "SandboxNone";
case Feature::SandboxSetuid: return "SandboxSetuid";
case Feature::SandboxNamespace: return "SandboxNamespace";
case Feature::SandboxAndroid: return "SandboxAndroid";
@@ -166,33 +177,36 @@ enum class HostMessagesRaw : uint8_t {
ExecRequest = 1,
SignalUpdate = 2,
StartLeakChecks = 3,
+ StateRequest = 4,
MIN = NONE,
- MAX = StartLeakChecks
+ MAX = StateRequest
};
-inline const HostMessagesRaw (&EnumValuesHostMessagesRaw())[4] {
+inline const HostMessagesRaw (&EnumValuesHostMessagesRaw())[5] {
static const HostMessagesRaw values[] = {
HostMessagesRaw::NONE,
HostMessagesRaw::ExecRequest,
HostMessagesRaw::SignalUpdate,
- HostMessagesRaw::StartLeakChecks
+ HostMessagesRaw::StartLeakChecks,
+ HostMessagesRaw::StateRequest
};
return values;
}
inline const char * const *EnumNamesHostMessagesRaw() {
- static const char * const names[5] = {
+ static const char * const names[6] = {
"NONE",
"ExecRequest",
"SignalUpdate",
"StartLeakChecks",
+ "StateRequest",
nullptr
};
return names;
}
inline const char *EnumNameHostMessagesRaw(HostMessagesRaw e) {
- if (flatbuffers::IsOutRange(e, HostMessagesRaw::NONE, HostMessagesRaw::StartLeakChecks)) return "";
+ if (flatbuffers::IsOutRange(e, HostMessagesRaw::NONE, HostMessagesRaw::StateRequest)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesHostMessagesRaw()[index];
}
@@ -213,6 +227,10 @@ template<> struct HostMessagesRawTraits<rpc::StartLeakChecksRaw> {
static const HostMessagesRaw enum_value = HostMessagesRaw::StartLeakChecks;
};
+template<> struct HostMessagesRawTraits<rpc::StateRequestRaw> {
+ static const HostMessagesRaw enum_value = HostMessagesRaw::StateRequest;
+};
+
template<typename T> struct HostMessagesRawUnionTraits {
static const HostMessagesRaw enum_value = HostMessagesRaw::NONE;
};
@@ -229,6 +247,10 @@ template<> struct HostMessagesRawUnionTraits<rpc::StartLeakChecksRawT> {
static const HostMessagesRaw enum_value = HostMessagesRaw::StartLeakChecks;
};
+template<> struct HostMessagesRawUnionTraits<rpc::StateRequestRawT> {
+ static const HostMessagesRaw enum_value = HostMessagesRaw::StateRequest;
+};
+
struct HostMessagesRawUnion {
HostMessagesRaw type;
void *value;
@@ -283,6 +305,14 @@ struct HostMessagesRawUnion {
return type == HostMessagesRaw::StartLeakChecks ?
reinterpret_cast<const rpc::StartLeakChecksRawT *>(value) : nullptr;
}
+ rpc::StateRequestRawT *AsStateRequest() {
+ return type == HostMessagesRaw::StateRequest ?
+ reinterpret_cast<rpc::StateRequestRawT *>(value) : nullptr;
+ }
+ const rpc::StateRequestRawT *AsStateRequest() const {
+ return type == HostMessagesRaw::StateRequest ?
+ reinterpret_cast<const rpc::StateRequestRawT *>(value) : nullptr;
+ }
};
bool VerifyHostMessagesRaw(flatbuffers::Verifier &verifier, const void *obj, HostMessagesRaw type);
@@ -292,31 +322,34 @@ enum class ExecutorMessagesRaw : uint8_t {
NONE = 0,
ExecResult = 1,
Executing = 2,
+ State = 3,
MIN = NONE,
- MAX = Executing
+ MAX = State
};
-inline const ExecutorMessagesRaw (&EnumValuesExecutorMessagesRaw())[3] {
+inline const ExecutorMessagesRaw (&EnumValuesExecutorMessagesRaw())[4] {
static const ExecutorMessagesRaw values[] = {
ExecutorMessagesRaw::NONE,
ExecutorMessagesRaw::ExecResult,
- ExecutorMessagesRaw::Executing
+ ExecutorMessagesRaw::Executing,
+ ExecutorMessagesRaw::State
};
return values;
}
inline const char * const *EnumNamesExecutorMessagesRaw() {
- static const char * const names[4] = {
+ static const char * const names[5] = {
"NONE",
"ExecResult",
"Executing",
+ "State",
nullptr
};
return names;
}
inline const char *EnumNameExecutorMessagesRaw(ExecutorMessagesRaw e) {
- if (flatbuffers::IsOutRange(e, ExecutorMessagesRaw::NONE, ExecutorMessagesRaw::Executing)) return "";
+ if (flatbuffers::IsOutRange(e, ExecutorMessagesRaw::NONE, ExecutorMessagesRaw::State)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesExecutorMessagesRaw()[index];
}
@@ -333,6 +366,10 @@ template<> struct ExecutorMessagesRawTraits<rpc::ExecutingMessageRaw> {
static const ExecutorMessagesRaw enum_value = ExecutorMessagesRaw::Executing;
};
+template<> struct ExecutorMessagesRawTraits<rpc::StateResultRaw> {
+ static const ExecutorMessagesRaw enum_value = ExecutorMessagesRaw::State;
+};
+
template<typename T> struct ExecutorMessagesRawUnionTraits {
static const ExecutorMessagesRaw enum_value = ExecutorMessagesRaw::NONE;
};
@@ -345,6 +382,10 @@ template<> struct ExecutorMessagesRawUnionTraits<rpc::ExecutingMessageRawT> {
static const ExecutorMessagesRaw enum_value = ExecutorMessagesRaw::Executing;
};
+template<> struct ExecutorMessagesRawUnionTraits<rpc::StateResultRawT> {
+ static const ExecutorMessagesRaw enum_value = ExecutorMessagesRaw::State;
+};
+
struct ExecutorMessagesRawUnion {
ExecutorMessagesRaw type;
void *value;
@@ -391,6 +432,14 @@ struct ExecutorMessagesRawUnion {
return type == ExecutorMessagesRaw::Executing ?
reinterpret_cast<const rpc::ExecutingMessageRawT *>(value) : nullptr;
}
+ rpc::StateResultRawT *AsState() {
+ return type == ExecutorMessagesRaw::State ?
+ reinterpret_cast<rpc::StateResultRawT *>(value) : nullptr;
+ }
+ const rpc::StateResultRawT *AsState() const {
+ return type == ExecutorMessagesRaw::State ?
+ reinterpret_cast<const rpc::StateResultRawT *>(value) : nullptr;
+ }
};
bool VerifyExecutorMessagesRaw(flatbuffers::Verifier &verifier, const void *obj, ExecutorMessagesRaw type);
@@ -398,18 +447,16 @@ bool VerifyExecutorMessagesRawVector(flatbuffers::Verifier &verifier, const flat
enum class RequestFlag : uint64_t {
IsBinary = 1ULL,
- ResetState = 2ULL,
- ReturnOutput = 4ULL,
- ReturnError = 8ULL,
+ ReturnOutput = 2ULL,
+ ReturnError = 4ULL,
NONE = 0,
- ANY = 15ULL
+ ANY = 7ULL
};
FLATBUFFERS_DEFINE_BITMASK_OPERATORS(RequestFlag, uint64_t)
-inline const RequestFlag (&EnumValuesRequestFlag())[4] {
+inline const RequestFlag (&EnumValuesRequestFlag())[3] {
static const RequestFlag values[] = {
RequestFlag::IsBinary,
- RequestFlag::ResetState,
RequestFlag::ReturnOutput,
RequestFlag::ReturnError
};
@@ -417,14 +464,10 @@ inline const RequestFlag (&EnumValuesRequestFlag())[4] {
}
inline const char * const *EnumNamesRequestFlag() {
- static const char * const names[9] = {
+ static const char * const names[5] = {
"IsBinary",
- "ResetState",
- "",
"ReturnOutput",
"",
- "",
- "",
"ReturnError",
nullptr
};
@@ -440,29 +483,33 @@ inline const char *EnumNameRequestFlag(RequestFlag e) {
enum class ExecEnv : uint64_t {
Debug = 1ULL,
Signal = 2ULL,
- SandboxSetuid = 4ULL,
- SandboxNamespace = 8ULL,
- SandboxAndroid = 16ULL,
- ExtraCover = 32ULL,
- EnableTun = 64ULL,
- EnableNetDev = 128ULL,
- EnableNetReset = 256ULL,
- EnableCgroups = 512ULL,
- EnableCloseFds = 1024ULL,
- EnableDevlinkPCI = 2048ULL,
- EnableVhciInjection = 4096ULL,
- EnableWifi = 8192ULL,
- DelayKcovMmap = 16384ULL,
- EnableNicVF = 32768ULL,
+ ResetState = 4ULL,
+ SandboxNone = 8ULL,
+ SandboxSetuid = 16ULL,
+ SandboxNamespace = 32ULL,
+ SandboxAndroid = 64ULL,
+ ExtraCover = 128ULL,
+ EnableTun = 256ULL,
+ EnableNetDev = 512ULL,
+ EnableNetReset = 1024ULL,
+ EnableCgroups = 2048ULL,
+ EnableCloseFds = 4096ULL,
+ EnableDevlinkPCI = 8192ULL,
+ EnableVhciInjection = 16384ULL,
+ EnableWifi = 32768ULL,
+ DelayKcovMmap = 65536ULL,
+ EnableNicVF = 131072ULL,
NONE = 0,
- ANY = 65535ULL
+ ANY = 262143ULL
};
FLATBUFFERS_DEFINE_BITMASK_OPERATORS(ExecEnv, uint64_t)
-inline const ExecEnv (&EnumValuesExecEnv())[16] {
+inline const ExecEnv (&EnumValuesExecEnv())[18] {
static const ExecEnv values[] = {
ExecEnv::Debug,
ExecEnv::Signal,
+ ExecEnv::ResetState,
+ ExecEnv::SandboxNone,
ExecEnv::SandboxSetuid,
ExecEnv::SandboxNamespace,
ExecEnv::SandboxAndroid,
@@ -485,6 +532,8 @@ inline const char *EnumNameExecEnv(ExecEnv e) {
switch (e) {
case ExecEnv::Debug: return "Debug";
case ExecEnv::Signal: return "Signal";
+ case ExecEnv::ResetState: return "ResetState";
+ case ExecEnv::SandboxNone: return "SandboxNone";
case ExecEnv::SandboxSetuid: return "SandboxSetuid";
case ExecEnv::SandboxNamespace: return "SandboxNamespace";
case ExecEnv::SandboxAndroid: return "SandboxAndroid";
@@ -757,8 +806,11 @@ flatbuffers::Offset<ConnectRequestRaw> CreateConnectRequestRaw(flatbuffers::Flat
struct ConnectReplyRawT : public flatbuffers::NativeTable {
typedef ConnectReplyRaw TableType;
bool debug = false;
+ bool cover = false;
int32_t procs = 0;
int32_t slowdown = 0;
+ int32_t syscall_timeout_ms = 0;
+ int32_t program_timeout_ms = 0;
std::vector<std::string> leak_frames{};
std::vector<std::string> race_frames{};
rpc::Feature features = static_cast<rpc::Feature>(0);
@@ -771,23 +823,35 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef ConnectReplyRawBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_DEBUG = 4,
- VT_PROCS = 6,
- VT_SLOWDOWN = 8,
- VT_LEAK_FRAMES = 10,
- VT_RACE_FRAMES = 12,
- VT_FEATURES = 14,
- VT_FILES = 16,
- VT_GLOBS = 18
+ VT_COVER = 6,
+ VT_PROCS = 8,
+ VT_SLOWDOWN = 10,
+ VT_SYSCALL_TIMEOUT_MS = 12,
+ VT_PROGRAM_TIMEOUT_MS = 14,
+ VT_LEAK_FRAMES = 16,
+ VT_RACE_FRAMES = 18,
+ VT_FEATURES = 20,
+ VT_FILES = 22,
+ VT_GLOBS = 24
};
bool debug() const {
return GetField<uint8_t>(VT_DEBUG, 0) != 0;
}
+ bool cover() const {
+ return GetField<uint8_t>(VT_COVER, 0) != 0;
+ }
int32_t procs() const {
return GetField<int32_t>(VT_PROCS, 0);
}
int32_t slowdown() const {
return GetField<int32_t>(VT_SLOWDOWN, 0);
}
+ int32_t syscall_timeout_ms() const {
+ return GetField<int32_t>(VT_SYSCALL_TIMEOUT_MS, 0);
+ }
+ int32_t program_timeout_ms() const {
+ return GetField<int32_t>(VT_PROGRAM_TIMEOUT_MS, 0);
+ }
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *leak_frames() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_LEAK_FRAMES);
}
@@ -806,8 +870,11 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<uint8_t>(verifier, VT_DEBUG, 1) &&
+ VerifyField<uint8_t>(verifier, VT_COVER, 1) &&
VerifyField<int32_t>(verifier, VT_PROCS, 4) &&
VerifyField<int32_t>(verifier, VT_SLOWDOWN, 4) &&
+ VerifyField<int32_t>(verifier, VT_SYSCALL_TIMEOUT_MS, 4) &&
+ VerifyField<int32_t>(verifier, VT_PROGRAM_TIMEOUT_MS, 4) &&
VerifyOffset(verifier, VT_LEAK_FRAMES) &&
verifier.VerifyVector(leak_frames()) &&
verifier.VerifyVectorOfStrings(leak_frames()) &&
@@ -835,12 +902,21 @@ struct ConnectReplyRawBuilder {
void add_debug(bool debug) {
fbb_.AddElement<uint8_t>(ConnectReplyRaw::VT_DEBUG, static_cast<uint8_t>(debug), 0);
}
+ void add_cover(bool cover) {
+ fbb_.AddElement<uint8_t>(ConnectReplyRaw::VT_COVER, static_cast<uint8_t>(cover), 0);
+ }
void add_procs(int32_t procs) {
fbb_.AddElement<int32_t>(ConnectReplyRaw::VT_PROCS, procs, 0);
}
void add_slowdown(int32_t slowdown) {
fbb_.AddElement<int32_t>(ConnectReplyRaw::VT_SLOWDOWN, slowdown, 0);
}
+ void add_syscall_timeout_ms(int32_t syscall_timeout_ms) {
+ fbb_.AddElement<int32_t>(ConnectReplyRaw::VT_SYSCALL_TIMEOUT_MS, syscall_timeout_ms, 0);
+ }
+ void add_program_timeout_ms(int32_t program_timeout_ms) {
+ fbb_.AddElement<int32_t>(ConnectReplyRaw::VT_PROGRAM_TIMEOUT_MS, program_timeout_ms, 0);
+ }
void add_leak_frames(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> leak_frames) {
fbb_.AddOffset(ConnectReplyRaw::VT_LEAK_FRAMES, leak_frames);
}
@@ -870,8 +946,11 @@ struct ConnectReplyRawBuilder {
inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(
flatbuffers::FlatBufferBuilder &_fbb,
bool debug = false,
+ bool cover = false,
int32_t procs = 0,
int32_t slowdown = 0,
+ int32_t syscall_timeout_ms = 0,
+ int32_t program_timeout_ms = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> leak_frames = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> race_frames = 0,
rpc::Feature features = static_cast<rpc::Feature>(0),
@@ -883,8 +962,11 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(
builder_.add_files(files);
builder_.add_race_frames(race_frames);
builder_.add_leak_frames(leak_frames);
+ builder_.add_program_timeout_ms(program_timeout_ms);
+ builder_.add_syscall_timeout_ms(syscall_timeout_ms);
builder_.add_slowdown(slowdown);
builder_.add_procs(procs);
+ builder_.add_cover(cover);
builder_.add_debug(debug);
return builder_.Finish();
}
@@ -892,8 +974,11 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(
inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRawDirect(
flatbuffers::FlatBufferBuilder &_fbb,
bool debug = false,
+ bool cover = false,
int32_t procs = 0,
int32_t slowdown = 0,
+ int32_t syscall_timeout_ms = 0,
+ int32_t program_timeout_ms = 0,
const std::vector<flatbuffers::Offset<flatbuffers::String>> *leak_frames = nullptr,
const std::vector<flatbuffers::Offset<flatbuffers::String>> *race_frames = nullptr,
rpc::Feature features = static_cast<rpc::Feature>(0),
@@ -906,8 +991,11 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRawDirect(
return rpc::CreateConnectReplyRaw(
_fbb,
debug,
+ cover,
procs,
slowdown,
+ syscall_timeout_ms,
+ program_timeout_ms,
leak_frames__,
race_frames__,
features,
@@ -1392,6 +1480,9 @@ struct HostMessageRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const rpc::StartLeakChecksRaw *msg_as_StartLeakChecks() const {
return msg_type() == rpc::HostMessagesRaw::StartLeakChecks ? static_cast<const rpc::StartLeakChecksRaw *>(msg()) : nullptr;
}
+ const rpc::StateRequestRaw *msg_as_StateRequest() const {
+ return msg_type() == rpc::HostMessagesRaw::StateRequest ? static_cast<const rpc::StateRequestRaw *>(msg()) : nullptr;
+ }
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<uint8_t>(verifier, VT_MSG_TYPE, 1) &&
@@ -1416,6 +1507,10 @@ template<> inline const rpc::StartLeakChecksRaw *HostMessageRaw::msg_as<rpc::Sta
return msg_as_StartLeakChecks();
}
+template<> inline const rpc::StateRequestRaw *HostMessageRaw::msg_as<rpc::StateRequestRaw>() const {
+ return msg_as_StateRequest();
+}
+
struct HostMessageRawBuilder {
typedef HostMessageRaw Table;
flatbuffers::FlatBufferBuilder &fbb_;
@@ -1474,6 +1569,9 @@ struct ExecutorMessageRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const rpc::ExecutingMessageRaw *msg_as_Executing() const {
return msg_type() == rpc::ExecutorMessagesRaw::Executing ? static_cast<const rpc::ExecutingMessageRaw *>(msg()) : nullptr;
}
+ const rpc::StateResultRaw *msg_as_State() const {
+ return msg_type() == rpc::ExecutorMessagesRaw::State ? static_cast<const rpc::StateResultRaw *>(msg()) : nullptr;
+ }
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<uint8_t>(verifier, VT_MSG_TYPE, 1) &&
@@ -1494,6 +1592,10 @@ template<> inline const rpc::ExecutingMessageRaw *ExecutorMessageRaw::msg_as<rpc
return msg_as_Executing();
}
+template<> inline const rpc::StateResultRaw *ExecutorMessageRaw::msg_as<rpc::StateResultRaw>() const {
+ return msg_as_State();
+}
+
struct ExecutorMessageRawBuilder {
typedef ExecutorMessageRaw Table;
flatbuffers::FlatBufferBuilder &fbb_;
@@ -1533,10 +1635,7 @@ struct ExecRequestRawT : public flatbuffers::NativeTable {
std::vector<uint8_t> prog_data{};
std::unique_ptr<rpc::ExecOptsRaw> exec_opts{};
rpc::RequestFlag flags = static_cast<rpc::RequestFlag>(0);
- std::vector<uint64_t> signal_filter{};
- int32_t signal_filter_call = 0;
std::vector<int32_t> all_signal{};
- int32_t repeat = 0;
ExecRequestRawT() = default;
ExecRequestRawT(const ExecRequestRawT &o);
ExecRequestRawT(ExecRequestRawT&&) FLATBUFFERS_NOEXCEPT = default;
@@ -1551,10 +1650,7 @@ struct ExecRequestRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VT_PROG_DATA = 6,
VT_EXEC_OPTS = 8,
VT_FLAGS = 10,
- VT_SIGNAL_FILTER = 12,
- VT_SIGNAL_FILTER_CALL = 14,
- VT_ALL_SIGNAL = 16,
- VT_REPEAT = 18
+ VT_ALL_SIGNAL = 12
};
int64_t id() const {
return GetField<int64_t>(VT_ID, 0);
@@ -1568,18 +1664,9 @@ struct ExecRequestRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
rpc::RequestFlag flags() const {
return static_cast<rpc::RequestFlag>(GetField<uint64_t>(VT_FLAGS, 0));
}
- const flatbuffers::Vector<uint64_t> *signal_filter() const {
- return GetPointer<const flatbuffers::Vector<uint64_t> *>(VT_SIGNAL_FILTER);
- }
- int32_t signal_filter_call() const {
- return GetField<int32_t>(VT_SIGNAL_FILTER_CALL, 0);
- }
const flatbuffers::Vector<int32_t> *all_signal() const {
return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_ALL_SIGNAL);
}
- int32_t repeat() const {
- return GetField<int32_t>(VT_REPEAT, 0);
- }
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int64_t>(verifier, VT_ID, 8) &&
@@ -1587,12 +1674,8 @@ struct ExecRequestRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
verifier.VerifyVector(prog_data()) &&
VerifyField<rpc::ExecOptsRaw>(verifier, VT_EXEC_OPTS, 8) &&
VerifyField<uint64_t>(verifier, VT_FLAGS, 8) &&
- VerifyOffset(verifier, VT_SIGNAL_FILTER) &&
- verifier.VerifyVector(signal_filter()) &&
- VerifyField<int32_t>(verifier, VT_SIGNAL_FILTER_CALL, 4) &&
VerifyOffset(verifier, VT_ALL_SIGNAL) &&
verifier.VerifyVector(all_signal()) &&
- VerifyField<int32_t>(verifier, VT_REPEAT, 4) &&
verifier.EndTable();
}
ExecRequestRawT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
@@ -1616,18 +1699,9 @@ struct ExecRequestRawBuilder {
void add_flags(rpc::RequestFlag flags) {
fbb_.AddElement<uint64_t>(ExecRequestRaw::VT_FLAGS, static_cast<uint64_t>(flags), 0);
}
- void add_signal_filter(flatbuffers::Offset<flatbuffers::Vector<uint64_t>> signal_filter) {
- fbb_.AddOffset(ExecRequestRaw::VT_SIGNAL_FILTER, signal_filter);
- }
- void add_signal_filter_call(int32_t signal_filter_call) {
- fbb_.AddElement<int32_t>(ExecRequestRaw::VT_SIGNAL_FILTER_CALL, signal_filter_call, 0);
- }
void add_all_signal(flatbuffers::Offset<flatbuffers::Vector<int32_t>> all_signal) {
fbb_.AddOffset(ExecRequestRaw::VT_ALL_SIGNAL, all_signal);
}
- void add_repeat(int32_t repeat) {
- fbb_.AddElement<int32_t>(ExecRequestRaw::VT_REPEAT, repeat, 0);
- }
explicit ExecRequestRawBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
@@ -1645,17 +1719,11 @@ inline flatbuffers::Offset<ExecRequestRaw> CreateExecRequestRaw(
flatbuffers::Offset<flatbuffers::Vector<uint8_t>> prog_data = 0,
const rpc::ExecOptsRaw *exec_opts = nullptr,
rpc::RequestFlag flags = static_cast<rpc::RequestFlag>(0),
- flatbuffers::Offset<flatbuffers::Vector<uint64_t>> signal_filter = 0,
- int32_t signal_filter_call = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> all_signal = 0,
- int32_t repeat = 0) {
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> all_signal = 0) {
ExecRequestRawBuilder builder_(_fbb);
builder_.add_flags(flags);
builder_.add_id(id);
- builder_.add_repeat(repeat);
builder_.add_all_signal(all_signal);
- builder_.add_signal_filter_call(signal_filter_call);
- builder_.add_signal_filter(signal_filter);
builder_.add_exec_opts(exec_opts);
builder_.add_prog_data(prog_data);
return builder_.Finish();
@@ -1667,12 +1735,8 @@ inline flatbuffers::Offset<ExecRequestRaw> CreateExecRequestRawDirect(
const std::vector<uint8_t> *prog_data = nullptr,
const rpc::ExecOptsRaw *exec_opts = nullptr,
rpc::RequestFlag flags = static_cast<rpc::RequestFlag>(0),
- const std::vector<uint64_t> *signal_filter = nullptr,
- int32_t signal_filter_call = 0,
- const std::vector<int32_t> *all_signal = nullptr,
- int32_t repeat = 0) {
+ const std::vector<int32_t> *all_signal = nullptr) {
auto prog_data__ = prog_data ? _fbb.CreateVector<uint8_t>(*prog_data) : 0;
- auto signal_filter__ = signal_filter ? _fbb.CreateVector<uint64_t>(*signal_filter) : 0;
auto all_signal__ = all_signal ? _fbb.CreateVector<int32_t>(*all_signal) : 0;
return rpc::CreateExecRequestRaw(
_fbb,
@@ -1680,10 +1744,7 @@ inline flatbuffers::Offset<ExecRequestRaw> CreateExecRequestRawDirect(
prog_data__,
exec_opts,
flags,
- signal_filter__,
- signal_filter_call,
- all_signal__,
- repeat);
+ all_signal__);
}
flatbuffers::Offset<ExecRequestRaw> CreateExecRequestRaw(flatbuffers::FlatBufferBuilder &_fbb, const ExecRequestRawT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
@@ -1804,6 +1865,45 @@ inline flatbuffers::Offset<StartLeakChecksRaw> CreateStartLeakChecksRaw(
flatbuffers::Offset<StartLeakChecksRaw> CreateStartLeakChecksRaw(flatbuffers::FlatBufferBuilder &_fbb, const StartLeakChecksRawT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+struct StateRequestRawT : public flatbuffers::NativeTable {
+ typedef StateRequestRaw TableType;
+};
+
+struct StateRequestRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef StateRequestRawT NativeTableType;
+ typedef StateRequestRawBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ verifier.EndTable();
+ }
+ StateRequestRawT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(StateRequestRawT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<StateRequestRaw> Pack(flatbuffers::FlatBufferBuilder &_fbb, const StateRequestRawT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StateRequestRawBuilder {
+ typedef StateRequestRaw Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit StateRequestRawBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<StateRequestRaw> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<StateRequestRaw>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<StateRequestRaw> CreateStateRequestRaw(
+ flatbuffers::FlatBufferBuilder &_fbb) {
+ StateRequestRawBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<StateRequestRaw> CreateStateRequestRaw(flatbuffers::FlatBufferBuilder &_fbb, const StateRequestRawT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
struct ExecutingMessageRawT : public flatbuffers::NativeTable {
typedef ExecutingMessageRaw TableType;
int64_t id = 0;
@@ -2010,6 +2110,7 @@ flatbuffers::Offset<CallInfoRaw> CreateCallInfoRaw(flatbuffers::FlatBufferBuilde
struct ProgInfoRawT : public flatbuffers::NativeTable {
typedef ProgInfoRaw TableType;
std::vector<std::unique_ptr<rpc::CallInfoRawT>> calls{};
+ std::vector<std::unique_ptr<rpc::CallInfoRawT>> extra_raw{};
std::unique_ptr<rpc::CallInfoRawT> extra{};
uint64_t elapsed = 0;
uint64_t freshness = 0;
@@ -2024,13 +2125,17 @@ struct ProgInfoRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef ProgInfoRawBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_CALLS = 4,
- VT_EXTRA = 6,
- VT_ELAPSED = 8,
- VT_FRESHNESS = 10
+ VT_EXTRA_RAW = 6,
+ VT_EXTRA = 8,
+ VT_ELAPSED = 10,
+ VT_FRESHNESS = 12
};
const flatbuffers::Vector<flatbuffers::Offset<rpc::CallInfoRaw>> *calls() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<rpc::CallInfoRaw>> *>(VT_CALLS);
}
+ const flatbuffers::Vector<flatbuffers::Offset<rpc::CallInfoRaw>> *extra_raw() const {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<rpc::CallInfoRaw>> *>(VT_EXTRA_RAW);
+ }
const rpc::CallInfoRaw *extra() const {
return GetPointer<const rpc::CallInfoRaw *>(VT_EXTRA);
}
@@ -2045,6 +2150,9 @@ struct ProgInfoRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VerifyOffset(verifier, VT_CALLS) &&
verifier.VerifyVector(calls()) &&
verifier.VerifyVectorOfTables(calls()) &&
+ VerifyOffset(verifier, VT_EXTRA_RAW) &&
+ verifier.VerifyVector(extra_raw()) &&
+ verifier.VerifyVectorOfTables(extra_raw()) &&
VerifyOffset(verifier, VT_EXTRA) &&
verifier.VerifyTable(extra()) &&
VerifyField<uint64_t>(verifier, VT_ELAPSED, 8) &&
@@ -2063,6 +2171,9 @@ struct ProgInfoRawBuilder {
void add_calls(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<rpc::CallInfoRaw>>> calls) {
fbb_.AddOffset(ProgInfoRaw::VT_CALLS, calls);
}
+ void add_extra_raw(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<rpc::CallInfoRaw>>> extra_raw) {
+ fbb_.AddOffset(ProgInfoRaw::VT_EXTRA_RAW, extra_raw);
+ }
void add_extra(flatbuffers::Offset<rpc::CallInfoRaw> extra) {
fbb_.AddOffset(ProgInfoRaw::VT_EXTRA, extra);
}
@@ -2086,6 +2197,7 @@ struct ProgInfoRawBuilder {
inline flatbuffers::Offset<ProgInfoRaw> CreateProgInfoRaw(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<rpc::CallInfoRaw>>> calls = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<rpc::CallInfoRaw>>> extra_raw = 0,
flatbuffers::Offset<rpc::CallInfoRaw> extra = 0,
uint64_t elapsed = 0,
uint64_t freshness = 0) {
@@ -2093,6 +2205,7 @@ inline flatbuffers::Offset<ProgInfoRaw> CreateProgInfoRaw(
builder_.add_freshness(freshness);
builder_.add_elapsed(elapsed);
builder_.add_extra(extra);
+ builder_.add_extra_raw(extra_raw);
builder_.add_calls(calls);
return builder_.Finish();
}
@@ -2100,13 +2213,16 @@ inline flatbuffers::Offset<ProgInfoRaw> CreateProgInfoRaw(
inline flatbuffers::Offset<ProgInfoRaw> CreateProgInfoRawDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const std::vector<flatbuffers::Offset<rpc::CallInfoRaw>> *calls = nullptr,
+ const std::vector<flatbuffers::Offset<rpc::CallInfoRaw>> *extra_raw = nullptr,
flatbuffers::Offset<rpc::CallInfoRaw> extra = 0,
uint64_t elapsed = 0,
uint64_t freshness = 0) {
auto calls__ = calls ? _fbb.CreateVector<flatbuffers::Offset<rpc::CallInfoRaw>>(*calls) : 0;
+ auto extra_raw__ = extra_raw ? _fbb.CreateVector<flatbuffers::Offset<rpc::CallInfoRaw>>(*extra_raw) : 0;
return rpc::CreateProgInfoRaw(
_fbb,
calls__,
+ extra_raw__,
extra,
elapsed,
freshness);
@@ -2222,6 +2338,68 @@ inline flatbuffers::Offset<ExecResultRaw> CreateExecResultRawDirect(
flatbuffers::Offset<ExecResultRaw> CreateExecResultRaw(flatbuffers::FlatBufferBuilder &_fbb, const ExecResultRawT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+struct StateResultRawT : public flatbuffers::NativeTable {
+ typedef StateResultRaw TableType;
+ std::vector<uint8_t> data{};
+};
+
+struct StateResultRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef StateResultRawT NativeTableType;
+ typedef StateResultRawBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+ VT_DATA = 4
+ };
+ const flatbuffers::Vector<uint8_t> *data() const {
+ return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DATA);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyOffset(verifier, VT_DATA) &&
+ verifier.VerifyVector(data()) &&
+ verifier.EndTable();
+ }
+ StateResultRawT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(StateResultRawT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<StateResultRaw> Pack(flatbuffers::FlatBufferBuilder &_fbb, const StateResultRawT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StateResultRawBuilder {
+ typedef StateResultRaw Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_data(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data) {
+ fbb_.AddOffset(StateResultRaw::VT_DATA, data);
+ }
+ explicit StateResultRawBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<StateResultRaw> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<StateResultRaw>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<StateResultRaw> CreateStateResultRaw(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data = 0) {
+ StateResultRawBuilder builder_(_fbb);
+ builder_.add_data(data);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<StateResultRaw> CreateStateResultRawDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<uint8_t> *data = nullptr) {
+ auto data__ = data ? _fbb.CreateVector<uint8_t>(*data) : 0;
+ return rpc::CreateStateResultRaw(
+ _fbb,
+ data__);
+}
+
+flatbuffers::Offset<StateResultRaw> CreateStateResultRaw(flatbuffers::FlatBufferBuilder &_fbb, const StateResultRawT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
inline ConnectRequestRawT *ConnectRequestRaw::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = std::unique_ptr<ConnectRequestRawT>(new ConnectRequestRawT());
UnPackTo(_o.get(), _resolver);
@@ -2267,8 +2445,11 @@ inline void ConnectReplyRaw::UnPackTo(ConnectReplyRawT *_o, const flatbuffers::r
(void)_o;
(void)_resolver;
{ auto _e = debug(); _o->debug = _e; }
+ { auto _e = cover(); _o->cover = _e; }
{ auto _e = procs(); _o->procs = _e; }
{ auto _e = slowdown(); _o->slowdown = _e; }
+ { auto _e = syscall_timeout_ms(); _o->syscall_timeout_ms = _e; }
+ { auto _e = program_timeout_ms(); _o->program_timeout_ms = _e; }
{ auto _e = leak_frames(); if (_e) { _o->leak_frames.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->leak_frames[_i] = _e->Get(_i)->str(); } } }
{ auto _e = race_frames(); if (_e) { _o->race_frames.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->race_frames[_i] = _e->Get(_i)->str(); } } }
{ auto _e = features(); _o->features = _e; }
@@ -2285,8 +2466,11 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(flatbuffers::F
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConnectReplyRawT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _debug = _o->debug;
+ auto _cover = _o->cover;
auto _procs = _o->procs;
auto _slowdown = _o->slowdown;
+ auto _syscall_timeout_ms = _o->syscall_timeout_ms;
+ auto _program_timeout_ms = _o->program_timeout_ms;
auto _leak_frames = _o->leak_frames.size() ? _fbb.CreateVectorOfStrings(_o->leak_frames) : 0;
auto _race_frames = _o->race_frames.size() ? _fbb.CreateVectorOfStrings(_o->race_frames) : 0;
auto _features = _o->features;
@@ -2295,8 +2479,11 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(flatbuffers::F
return rpc::CreateConnectReplyRaw(
_fbb,
_debug,
+ _cover,
_procs,
_slowdown,
+ _syscall_timeout_ms,
+ _program_timeout_ms,
_leak_frames,
_race_frames,
_features,
@@ -2542,10 +2729,7 @@ inline ExecRequestRawT::ExecRequestRawT(const ExecRequestRawT &o)
prog_data(o.prog_data),
exec_opts((o.exec_opts) ? new rpc::ExecOptsRaw(*o.exec_opts) : nullptr),
flags(o.flags),
- signal_filter(o.signal_filter),
- signal_filter_call(o.signal_filter_call),
- all_signal(o.all_signal),
- repeat(o.repeat) {
+ all_signal(o.all_signal) {
}
inline ExecRequestRawT &ExecRequestRawT::operator=(ExecRequestRawT o) FLATBUFFERS_NOEXCEPT {
@@ -2553,10 +2737,7 @@ inline ExecRequestRawT &ExecRequestRawT::operator=(ExecRequestRawT o) FLATBUFFER
std::swap(prog_data, o.prog_data);
std::swap(exec_opts, o.exec_opts);
std::swap(flags, o.flags);
- std::swap(signal_filter, o.signal_filter);
- std::swap(signal_filter_call, o.signal_filter_call);
std::swap(all_signal, o.all_signal);
- std::swap(repeat, o.repeat);
return *this;
}
@@ -2573,10 +2754,7 @@ inline void ExecRequestRaw::UnPackTo(ExecRequestRawT *_o, const flatbuffers::res
{ auto _e = prog_data(); if (_e) { _o->prog_data.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->prog_data.begin()); } }
{ auto _e = exec_opts(); if (_e) _o->exec_opts = std::unique_ptr<rpc::ExecOptsRaw>(new rpc::ExecOptsRaw(*_e)); }
{ auto _e = flags(); _o->flags = _e; }
- { auto _e = signal_filter(); if (_e) { _o->signal_filter.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->signal_filter[_i] = _e->Get(_i); } } }
- { auto _e = signal_filter_call(); _o->signal_filter_call = _e; }
{ auto _e = all_signal(); if (_e) { _o->all_signal.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->all_signal[_i] = _e->Get(_i); } } }
- { auto _e = repeat(); _o->repeat = _e; }
}
inline flatbuffers::Offset<ExecRequestRaw> ExecRequestRaw::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExecRequestRawT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
@@ -2591,20 +2769,14 @@ inline flatbuffers::Offset<ExecRequestRaw> CreateExecRequestRaw(flatbuffers::Fla
auto _prog_data = _o->prog_data.size() ? _fbb.CreateVector(_o->prog_data) : 0;
auto _exec_opts = _o->exec_opts ? _o->exec_opts.get() : nullptr;
auto _flags = _o->flags;
- auto _signal_filter = _o->signal_filter.size() ? _fbb.CreateVector(_o->signal_filter) : 0;
- auto _signal_filter_call = _o->signal_filter_call;
auto _all_signal = _o->all_signal.size() ? _fbb.CreateVector(_o->all_signal) : 0;
- auto _repeat = _o->repeat;
return rpc::CreateExecRequestRaw(
_fbb,
_id,
_prog_data,
_exec_opts,
_flags,
- _signal_filter,
- _signal_filter_call,
- _all_signal,
- _repeat);
+ _all_signal);
}
inline SignalUpdateRawT *SignalUpdateRaw::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
@@ -2659,6 +2831,29 @@ inline flatbuffers::Offset<StartLeakChecksRaw> CreateStartLeakChecksRaw(flatbuff
_fbb);
}
+inline StateRequestRawT *StateRequestRaw::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = std::unique_ptr<StateRequestRawT>(new StateRequestRawT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void StateRequestRaw::UnPackTo(StateRequestRawT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<StateRequestRaw> StateRequestRaw::Pack(flatbuffers::FlatBufferBuilder &_fbb, const StateRequestRawT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateStateRequestRaw(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<StateRequestRaw> CreateStateRequestRaw(flatbuffers::FlatBufferBuilder &_fbb, const StateRequestRawT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const StateRequestRawT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ return rpc::CreateStateRequestRaw(
+ _fbb);
+}
+
inline ExecutingMessageRawT *ExecutingMessageRaw::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = std::unique_ptr<ExecutingMessageRawT>(new ExecutingMessageRawT());
UnPackTo(_o.get(), _resolver);
@@ -2738,10 +2933,13 @@ inline ProgInfoRawT::ProgInfoRawT(const ProgInfoRawT &o)
freshness(o.freshness) {
calls.reserve(o.calls.size());
for (const auto &calls_ : o.calls) { calls.emplace_back((calls_) ? new rpc::CallInfoRawT(*calls_) : nullptr); }
+ extra_raw.reserve(o.extra_raw.size());
+ for (const auto &extra_raw_ : o.extra_raw) { extra_raw.emplace_back((extra_raw_) ? new rpc::CallInfoRawT(*extra_raw_) : nullptr); }
}
inline ProgInfoRawT &ProgInfoRawT::operator=(ProgInfoRawT o) FLATBUFFERS_NOEXCEPT {
std::swap(calls, o.calls);
+ std::swap(extra_raw, o.extra_raw);
std::swap(extra, o.extra);
std::swap(elapsed, o.elapsed);
std::swap(freshness, o.freshness);
@@ -2758,6 +2956,7 @@ inline void ProgInfoRaw::UnPackTo(ProgInfoRawT *_o, const flatbuffers::resolver_
(void)_o;
(void)_resolver;
{ auto _e = calls(); if (_e) { _o->calls.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->calls[_i] = std::unique_ptr<rpc::CallInfoRawT>(_e->Get(_i)->UnPack(_resolver)); } } }
+ { auto _e = extra_raw(); if (_e) { _o->extra_raw.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->extra_raw[_i] = std::unique_ptr<rpc::CallInfoRawT>(_e->Get(_i)->UnPack(_resolver)); } } }
{ auto _e = extra(); if (_e) _o->extra = std::unique_ptr<rpc::CallInfoRawT>(_e->UnPack(_resolver)); }
{ auto _e = elapsed(); _o->elapsed = _e; }
{ auto _e = freshness(); _o->freshness = _e; }
@@ -2772,12 +2971,14 @@ inline flatbuffers::Offset<ProgInfoRaw> CreateProgInfoRaw(flatbuffers::FlatBuffe
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ProgInfoRawT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _calls = _o->calls.size() ? _fbb.CreateVector<flatbuffers::Offset<rpc::CallInfoRaw>> (_o->calls.size(), [](size_t i, _VectorArgs *__va) { return CreateCallInfoRaw(*__va->__fbb, __va->__o->calls[i].get(), __va->__rehasher); }, &_va ) : 0;
+ auto _extra_raw = _o->extra_raw.size() ? _fbb.CreateVector<flatbuffers::Offset<rpc::CallInfoRaw>> (_o->extra_raw.size(), [](size_t i, _VectorArgs *__va) { return CreateCallInfoRaw(*__va->__fbb, __va->__o->extra_raw[i].get(), __va->__rehasher); }, &_va ) : 0;
auto _extra = _o->extra ? CreateCallInfoRaw(_fbb, _o->extra.get(), _rehasher) : 0;
auto _elapsed = _o->elapsed;
auto _freshness = _o->freshness;
return rpc::CreateProgInfoRaw(
_fbb,
_calls,
+ _extra_raw,
_extra,
_elapsed,
_freshness);
@@ -2833,6 +3034,32 @@ inline flatbuffers::Offset<ExecResultRaw> CreateExecResultRaw(flatbuffers::FlatB
_info);
}
+inline StateResultRawT *StateResultRaw::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = std::unique_ptr<StateResultRawT>(new StateResultRawT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void StateResultRaw::UnPackTo(StateResultRawT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = data(); if (_e) { _o->data.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->data.begin()); } }
+}
+
+inline flatbuffers::Offset<StateResultRaw> StateResultRaw::Pack(flatbuffers::FlatBufferBuilder &_fbb, const StateResultRawT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateStateResultRaw(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<StateResultRaw> CreateStateResultRaw(flatbuffers::FlatBufferBuilder &_fbb, const StateResultRawT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const StateResultRawT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _data = _o->data.size() ? _fbb.CreateVector(_o->data) : 0;
+ return rpc::CreateStateResultRaw(
+ _fbb,
+ _data);
+}
+
inline bool VerifyHostMessagesRaw(flatbuffers::Verifier &verifier, const void *obj, HostMessagesRaw type) {
switch (type) {
case HostMessagesRaw::NONE: {
@@ -2850,6 +3077,10 @@ inline bool VerifyHostMessagesRaw(flatbuffers::Verifier &verifier, const void *o
auto ptr = reinterpret_cast<const rpc::StartLeakChecksRaw *>(obj);
return verifier.VerifyTable(ptr);
}
+ case HostMessagesRaw::StateRequest: {
+ auto ptr = reinterpret_cast<const rpc::StateRequestRaw *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
default: return true;
}
}
@@ -2881,6 +3112,10 @@ inline void *HostMessagesRawUnion::UnPack(const void *obj, HostMessagesRaw type,
auto ptr = reinterpret_cast<const rpc::StartLeakChecksRaw *>(obj);
return ptr->UnPack(resolver);
}
+ case HostMessagesRaw::StateRequest: {
+ auto ptr = reinterpret_cast<const rpc::StateRequestRaw *>(obj);
+ return ptr->UnPack(resolver);
+ }
default: return nullptr;
}
}
@@ -2900,6 +3135,10 @@ inline flatbuffers::Offset<void> HostMessagesRawUnion::Pack(flatbuffers::FlatBuf
auto ptr = reinterpret_cast<const rpc::StartLeakChecksRawT *>(value);
return CreateStartLeakChecksRaw(_fbb, ptr, _rehasher).Union();
}
+ case HostMessagesRaw::StateRequest: {
+ auto ptr = reinterpret_cast<const rpc::StateRequestRawT *>(value);
+ return CreateStateRequestRaw(_fbb, ptr, _rehasher).Union();
+ }
default: return 0;
}
}
@@ -2918,6 +3157,10 @@ inline HostMessagesRawUnion::HostMessagesRawUnion(const HostMessagesRawUnion &u)
value = new rpc::StartLeakChecksRawT(*reinterpret_cast<rpc::StartLeakChecksRawT *>(u.value));
break;
}
+ case HostMessagesRaw::StateRequest: {
+ value = new rpc::StateRequestRawT(*reinterpret_cast<rpc::StateRequestRawT *>(u.value));
+ break;
+ }
default:
break;
}
@@ -2940,6 +3183,11 @@ inline void HostMessagesRawUnion::Reset() {
delete ptr;
break;
}
+ case HostMessagesRaw::StateRequest: {
+ auto ptr = reinterpret_cast<rpc::StateRequestRawT *>(value);
+ delete ptr;
+ break;
+ }
default: break;
}
value = nullptr;
@@ -2959,6 +3207,10 @@ inline bool VerifyExecutorMessagesRaw(flatbuffers::Verifier &verifier, const voi
auto ptr = reinterpret_cast<const rpc::ExecutingMessageRaw *>(obj);
return verifier.VerifyTable(ptr);
}
+ case ExecutorMessagesRaw::State: {
+ auto ptr = reinterpret_cast<const rpc::StateResultRaw *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
default: return true;
}
}
@@ -2986,6 +3238,10 @@ inline void *ExecutorMessagesRawUnion::UnPack(const void *obj, ExecutorMessagesR
auto ptr = reinterpret_cast<const rpc::ExecutingMessageRaw *>(obj);
return ptr->UnPack(resolver);
}
+ case ExecutorMessagesRaw::State: {
+ auto ptr = reinterpret_cast<const rpc::StateResultRaw *>(obj);
+ return ptr->UnPack(resolver);
+ }
default: return nullptr;
}
}
@@ -3001,6 +3257,10 @@ inline flatbuffers::Offset<void> ExecutorMessagesRawUnion::Pack(flatbuffers::Fla
auto ptr = reinterpret_cast<const rpc::ExecutingMessageRawT *>(value);
return CreateExecutingMessageRaw(_fbb, ptr, _rehasher).Union();
}
+ case ExecutorMessagesRaw::State: {
+ auto ptr = reinterpret_cast<const rpc::StateResultRawT *>(value);
+ return CreateStateResultRaw(_fbb, ptr, _rehasher).Union();
+ }
default: return 0;
}
}
@@ -3015,6 +3275,10 @@ inline ExecutorMessagesRawUnion::ExecutorMessagesRawUnion(const ExecutorMessages
value = new rpc::ExecutingMessageRawT(*reinterpret_cast<rpc::ExecutingMessageRawT *>(u.value));
break;
}
+ case ExecutorMessagesRaw::State: {
+ value = new rpc::StateResultRawT(*reinterpret_cast<rpc::StateResultRawT *>(u.value));
+ break;
+ }
default:
break;
}
@@ -3032,6 +3296,11 @@ inline void ExecutorMessagesRawUnion::Reset() {
delete ptr;
break;
}
+ case ExecutorMessagesRaw::State: {
+ auto ptr = reinterpret_cast<rpc::StateResultRawT *>(value);
+ delete ptr;
+ break;
+ }
default: break;
}
value = nullptr;
diff --git a/pkg/flatrpc/helpers.go b/pkg/flatrpc/helpers.go
index 22dc893fe..b85951615 100644
--- a/pkg/flatrpc/helpers.go
+++ b/pkg/flatrpc/helpers.go
@@ -4,6 +4,7 @@
package flatrpc
import (
+ "fmt"
"slices"
"syscall"
)
@@ -25,6 +26,7 @@ type HostMessage = HostMessageRawT
type ExecutorMessages = ExecutorMessagesRawT
type ExecutorMessage = ExecutorMessageRawT
type ExecRequest = ExecRequestRawT
+type StateRequest = StateRequestRawT
type SignalUpdate = SignalUpdateRawT
type StartLeakChecks = StartLeakChecksRawT
type ExecutingMessage = ExecutingMessageRawT
@@ -33,6 +35,7 @@ type Comparison = ComparisonRawT
type ExecOpts = ExecOptsRawT
type ProgInfo = ProgInfoRawT
type ExecResult = ExecResultRawT
+type StateResult = StateResultRawT
func (pi *ProgInfo) Clone() *ProgInfo {
if pi == nil {
@@ -71,9 +74,30 @@ func EmptyProgInfo(calls int) *ProgInfo {
return info
}
-func (eo ExecOpts) MergeFlags(diff ExecOpts) ExecOpts {
- ret := eo
- ret.ExecFlags |= diff.ExecFlags
- ret.EnvFlags |= diff.EnvFlags
- return ret
+func SandboxToFlags(sandbox string) (ExecEnv, error) {
+ switch sandbox {
+ case "none":
+ return ExecEnvSandboxNone, nil
+ case "setuid":
+ return ExecEnvSandboxSetuid, nil
+ case "namespace":
+ return ExecEnvSandboxNamespace, nil
+ case "android":
+ return ExecEnvSandboxAndroid, nil
+ default:
+ return 0, fmt.Errorf("sandbox must contain one of none/setuid/namespace/android")
+ }
+}
+
+func FlagsToSandbox(flags ExecEnv) string {
+ if flags&ExecEnvSandboxNone != 0 {
+ return "none"
+ } else if flags&ExecEnvSandboxSetuid != 0 {
+ return "setuid"
+ } else if flags&ExecEnvSandboxNamespace != 0 {
+ return "namespace"
+ } else if flags&ExecEnvSandboxAndroid != 0 {
+ return "android"
+ }
+ panic("no sandbox flags present")
}
diff --git a/pkg/fuzzer/fuzzer.go b/pkg/fuzzer/fuzzer.go
index 92d8b0f8d..5b95f4eec 100644
--- a/pkg/fuzzer/fuzzer.go
+++ b/pkg/fuzzer/fuzzer.go
@@ -166,7 +166,6 @@ func (fuzzer *Fuzzer) processResult(req *queue.Request, res *queue.Result, flags
type Config struct {
Debug bool
Corpus *corpus.Corpus
- BaseOpts flatrpc.ExecOpts // Fuzzer will use BaseOpts as a base for all requests.
Logf func(level int, msg string, args ...interface{})
Coverage bool
FaultInjection bool
@@ -251,7 +250,6 @@ func (fuzzer *Fuzzer) Next() *queue.Request {
// The fuzzer is not supposed to issue nil requests.
panic("nil request from the fuzzer")
}
- req.ExecOpts = fuzzer.Config.BaseOpts.MergeFlags(req.ExecOpts)
return req
}
diff --git a/pkg/fuzzer/fuzzer_test.go b/pkg/fuzzer/fuzzer_test.go
index 206469fda..55ec09666 100644
--- a/pkg/fuzzer/fuzzer_test.go
+++ b/pkg/fuzzer/fuzzer_test.go
@@ -13,6 +13,7 @@ import (
"runtime"
"strings"
"sync"
+ "sync/atomic"
"testing"
"time"
@@ -20,14 +21,13 @@ import (
"github.com/google/syzkaller/pkg/csource"
"github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
- "github.com/google/syzkaller/pkg/ipc"
- "github.com/google/syzkaller/pkg/ipc/ipcconfig"
+ "github.com/google/syzkaller/pkg/rpcserver"
"github.com/google/syzkaller/pkg/signal"
"github.com/google/syzkaller/pkg/testutil"
+ "github.com/google/syzkaller/pkg/vminfo"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/targets"
"github.com/stretchr/testify/assert"
- "golang.org/x/sync/errgroup"
)
func TestFuzz(t *testing.T) {
@@ -42,15 +42,14 @@ func TestFuzz(t *testing.T) {
t.Skipf("skipping, broken cross-compiler: %v", sysTarget.BrokenCompiler)
}
executor := csource.BuildExecutor(t, target, "../..", "-fsanitize-coverage=trace-pc", "-g")
+
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- _, opts, _ := ipcconfig.Default(target)
corpusUpdates := make(chan corpus.NewItemEvent)
fuzzer := NewFuzzer(ctx, &Config{
- Debug: true,
- BaseOpts: *opts,
- Corpus: corpus.NewMonitoredCorpus(ctx, corpusUpdates),
+ Debug: true,
+ Corpus: corpus.NewMonitoredCorpus(ctx, corpusUpdates),
Logf: func(level int, msg string, args ...interface{}) {
if level > 1 {
return
@@ -74,24 +73,24 @@ func TestFuzz(t *testing.T) {
}
}()
- tf := newTestFuzzer(t, fuzzer, map[string]bool{
- "first bug": true,
- "second bug": true,
- }, 10000)
-
- for i := 0; i < 2; i++ {
- tf.registerExecutor(newProc(t, target, executor))
+ tf := &testFuzzer{
+ t: t,
+ target: target,
+ fuzzer: fuzzer,
+ executor: executor,
+ iterLimit: 10000,
+ expectedCrashes: map[string]bool{
+ "first bug": true,
+ "second bug": true,
+ },
}
- tf.wait()
+ tf.run()
t.Logf("resulting corpus:")
for _, p := range fuzzer.Config.Corpus.Programs() {
t.Logf("-----")
t.Logf("%s", p.Serialize())
}
-
- assert.Equal(t, len(tf.expectedCrashes), len(tf.crashes),
- "not all expected crashes were found")
}
func BenchmarkFuzzer(b *testing.B) {
@@ -204,114 +203,87 @@ func emulateExec(req *queue.Request) (*queue.Result, string, error) {
type testFuzzer struct {
t testing.TB
- eg errgroup.Group
+ target *prog.Target
fuzzer *Fuzzer
+ executor string
mu sync.Mutex
crashes map[string]int
expectedCrashes map[string]bool
iter int
iterLimit int
+ done func()
+ finished atomic.Bool
+}
+
+func (f *testFuzzer) run() {
+ f.crashes = make(map[string]int)
+ ctx, done := context.WithCancel(context.Background())
+ f.done = done
+ cfg := &rpcserver.LocalConfig{
+ Config: rpcserver.Config{
+ Config: vminfo.Config{
+ Target: f.target,
+ Features: flatrpc.FeatureSandboxNone,
+ Sandbox: flatrpc.ExecEnvSandboxNone,
+ },
+ Procs: 4,
+ Slowdown: 1,
+ },
+ Executor: f.executor,
+ Dir: f.t.TempDir(),
+ Context: ctx,
+ }
+ cfg.MachineChecked = func(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source {
+ cfg.Cover = true
+ return f
+ }
+ if err := rpcserver.RunLocal(cfg); err != nil {
+ f.t.Fatal(err)
+ }
+ assert.Equal(f.t, len(f.expectedCrashes), len(f.crashes), "not all expected crashes were found")
}
-func newTestFuzzer(t testing.TB, fuzzer *Fuzzer, expectedCrashes map[string]bool, iterLimit int) *testFuzzer {
- return &testFuzzer{
- t: t,
- fuzzer: fuzzer,
- expectedCrashes: expectedCrashes,
- crashes: map[string]int{},
- iterLimit: iterLimit,
+func (f *testFuzzer) Next() *queue.Request {
+ if f.finished.Load() {
+ return nil
}
+ req := f.fuzzer.Next()
+ req.ExecOpts.EnvFlags |= flatrpc.ExecEnvSignal | flatrpc.ExecEnvSandboxNone
+ req.ReturnOutput = true
+ req.ReturnError = true
+ req.OnDone(f.OnDone)
+ return req
}
-func (f *testFuzzer) oneMore() bool {
+func (f *testFuzzer) OnDone(req *queue.Request, res *queue.Result) bool {
+ // TODO: support hints emulation.
+ match := crashRe.FindSubmatch(res.Output)
f.mu.Lock()
defer f.mu.Unlock()
+ if match != nil {
+ crash := string(match[1])
+ f.t.Logf("CRASH: %s", crash)
+ res.Status = queue.Crashed
+ if !f.expectedCrashes[crash] {
+ f.t.Errorf("unexpected crash: %q", crash)
+ }
+ f.crashes[crash]++
+ }
f.iter++
if f.iter%100 == 0 {
f.t.Logf("<iter %d>: corpus %d, signal %d, max signal %d, crash types %d, running jobs %d",
f.iter, f.fuzzer.Config.Corpus.StatProgs.Val(), f.fuzzer.Config.Corpus.StatSignal.Val(),
len(f.fuzzer.Cover.maxSignal), len(f.crashes), f.fuzzer.statJobs.Val())
}
- return f.iter < f.iterLimit &&
- (f.expectedCrashes == nil || len(f.crashes) != len(f.expectedCrashes))
-}
-
-func (f *testFuzzer) registerExecutor(proc *executorProc) {
- f.eg.Go(func() error {
- for f.oneMore() {
- req := f.fuzzer.Next()
- res, crash, err := proc.execute(req)
- if err != nil {
- return err
- }
- if crash != "" {
- res = &queue.Result{Status: queue.Crashed}
- if !f.expectedCrashes[crash] {
- return fmt.Errorf("unexpected crash: %q", crash)
- }
- f.mu.Lock()
- f.t.Logf("CRASH: %s", crash)
- f.crashes[crash]++
- f.mu.Unlock()
- }
- req.Done(res)
- }
- return nil
- })
-}
-
-func (f *testFuzzer) wait() {
- t := f.t
- err := f.eg.Wait()
- if err != nil {
- t.Fatal(err)
- }
- t.Logf("crashes:")
- for title, cnt := range f.crashes {
- t.Logf("%s: %d", title, cnt)
- }
-}
-
-// TODO: it's already implemented in syz-fuzzer/proc.go,
-// pkg/runtest and tools/syz-execprog.
-// Looks like it's time to factor out this functionality.
-type executorProc struct {
- env *ipc.Env
- execOpts flatrpc.ExecOpts
-}
-
-func newProc(t *testing.T, target *prog.Target, executor string) *executorProc {
- config, execOpts, err := ipcconfig.Default(target)
- if err != nil {
- t.Fatal(err)
- }
- config.Executor = executor
- execOpts.EnvFlags |= flatrpc.ExecEnvSignal
- env, err := ipc.MakeEnv(config, 0)
- if err != nil {
- t.Fatal(err)
- }
- t.Cleanup(func() { env.Close() })
- return &executorProc{
- env: env,
- execOpts: *execOpts,
+ if !f.finished.Load() && (f.iter > f.iterLimit || len(f.crashes) == len(f.expectedCrashes)) {
+ f.done()
+ f.finished.Store(true)
}
+ return true
}
var crashRe = regexp.MustCompile(`{{CRASH: (.*?)}}`)
-func (proc *executorProc) execute(req *queue.Request) (*queue.Result, string, error) {
- // TODO: support hints emulation.
- output, info, _, err := proc.env.Exec(&req.ExecOpts, req.Prog)
- ret := crashRe.FindStringSubmatch(string(output))
- if ret != nil {
- return nil, ret[1], nil
- } else if err != nil {
- return nil, "", err
- }
- return &queue.Result{Info: info}, "", nil
-}
-
func checkGoroutineLeaks() {
// Inspired by src/net/http/main_test.go.
buf := make([]byte, 2<<20)
diff --git a/pkg/fuzzer/job.go b/pkg/fuzzer/job.go
index 0f6e0309c..0268172a9 100644
--- a/pkg/fuzzer/job.go
+++ b/pkg/fuzzer/job.go
@@ -121,6 +121,7 @@ func (job *triageJob) handleCall(call int, info *triageCall) {
}
if job.flags&ProgSmashed == 0 {
job.fuzzer.startJob(job.fuzzer.statJobsSmash, &smashJob{
+ exec: job.fuzzer.smashQueue,
p: p.Clone(),
call: call,
})
@@ -240,11 +241,10 @@ func (job *triageJob) minimize(call int, info *triageCall) (*prog.Prog, int) {
}
for i := 0; i < minimizeAttempts; i++ {
result := job.execute(&queue.Request{
- Prog: p1,
- ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal),
- SignalFilter: info.newStableSignal,
- SignalFilterCall: call1,
- Stat: job.fuzzer.statExecMinimize,
+ Prog: p1,
+ ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal),
+ ReturnAllSignal: []int{call1},
+ Stat: job.fuzzer.statExecMinimize,
}, 0)
if result.Stop() {
stop = true
@@ -294,6 +294,7 @@ func getSignalAndCover(p *prog.Prog, info *flatrpc.ProgInfo, call int) signal.Si
}
type smashJob struct {
+ exec queue.Executor
p *prog.Prog
call int
}
@@ -302,6 +303,7 @@ func (job *smashJob) run(fuzzer *Fuzzer) {
fuzzer.Logf(2, "smashing the program %s (call=%d):", job.p, job.call)
if fuzzer.Config.Comparisons && job.call >= 0 {
fuzzer.startJob(fuzzer.statJobsHints, &hintsJob{
+ exec: fuzzer.smashQueue,
p: job.p.Clone(),
call: job.call,
})
@@ -315,7 +317,7 @@ func (job *smashJob) run(fuzzer *Fuzzer) {
fuzzer.ChoiceTable(),
fuzzer.Config.NoMutateCalls,
fuzzer.Config.Corpus.Programs())
- result := fuzzer.execute(fuzzer.smashQueue, &queue.Request{
+ result := fuzzer.execute(job.exec, &queue.Request{
Prog: p,
ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal),
Stat: fuzzer.statExecSmash,
@@ -324,7 +326,7 @@ func (job *smashJob) run(fuzzer *Fuzzer) {
return
}
if fuzzer.Config.Collide {
- result := fuzzer.execute(fuzzer.smashQueue, &queue.Request{
+ result := fuzzer.execute(job.exec, &queue.Request{
Prog: randomCollide(p, rnd),
Stat: fuzzer.statExecCollide,
})
@@ -366,7 +368,7 @@ func (job *smashJob) faultInjection(fuzzer *Fuzzer) {
job.call, nth)
newProg := job.p.Clone()
newProg.Calls[job.call].Props.FailNth = nth
- result := fuzzer.execute(fuzzer.smashQueue, &queue.Request{
+ result := fuzzer.execute(job.exec, &queue.Request{
Prog: newProg,
Stat: fuzzer.statExecFaultInject,
})
@@ -382,6 +384,7 @@ func (job *smashJob) faultInjection(fuzzer *Fuzzer) {
}
type hintsJob struct {
+ exec queue.Executor
p *prog.Prog
call int
}
@@ -393,7 +396,7 @@ func (job *hintsJob) run(fuzzer *Fuzzer) {
var comps prog.CompMap
for i := 0; i < 2; i++ {
- result := fuzzer.execute(fuzzer.smashQueue, &queue.Request{
+ result := fuzzer.execute(job.exec, &queue.Request{
Prog: p,
ExecOpts: setFlags(flatrpc.ExecFlagCollectComps),
Stat: fuzzer.statExecSeed,
@@ -420,7 +423,7 @@ func (job *hintsJob) run(fuzzer *Fuzzer) {
// Execute each of such mutants to check if it gives new coverage.
p.MutateWithHints(job.call, comps,
func(p *prog.Prog) bool {
- result := fuzzer.execute(fuzzer.smashQueue, &queue.Request{
+ result := fuzzer.execute(job.exec, &queue.Request{
Prog: p,
ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal),
Stat: fuzzer.statExecHint,
diff --git a/pkg/fuzzer/queue/queue.go b/pkg/fuzzer/queue/queue.go
index 46df9d234..051f7205e 100644
--- a/pkg/fuzzer/queue/queue.go
+++ b/pkg/fuzzer/queue/queue.go
@@ -37,7 +37,6 @@ type Request struct {
// Options needed by runtest.
BinaryFile string // If set, it's executed instead of Prog.
- Repeat int // Repeats in addition to the first run.
// Important requests will be retried even from crashed VMs.
Important bool
@@ -113,6 +112,11 @@ func (r *Request) Validate() error {
if (collectComps) && (collectSignal || collectCover) {
return fmt.Errorf("hint collection is mutually exclusive with signal/coverage")
}
+ sandboxes := flatrpc.ExecEnvSandboxNone | flatrpc.ExecEnvSandboxSetuid |
+ flatrpc.ExecEnvSandboxNamespace | flatrpc.ExecEnvSandboxAndroid
+ if r.BinaryFile == "" && r.ExecOpts.EnvFlags&sandboxes == 0 {
+ return fmt.Errorf("no sandboxes set")
+ }
return nil
}
@@ -415,3 +419,24 @@ func (d *Deduplicator) onDone(req *Request, res *Result) bool {
}
return true
}
+
+// DefaultOpts applies opts to all requests in source.
+func DefaultOpts(source Source, opts flatrpc.ExecOpts) Source {
+ return &defaultOpts{source, opts}
+}
+
+type defaultOpts struct {
+ source Source
+ opts flatrpc.ExecOpts
+}
+
+func (do *defaultOpts) Next() *Request {
+ req := do.source.Next()
+ if req == nil {
+ return nil
+ }
+ req.ExecOpts.ExecFlags |= do.opts.ExecFlags
+ req.ExecOpts.EnvFlags |= do.opts.EnvFlags
+ req.ExecOpts.SandboxArg = do.opts.SandboxArg
+ return req
+}
diff --git a/pkg/host/features.go b/pkg/host/features.go
deleted file mode 100644
index 91ea0de7e..000000000
--- a/pkg/host/features.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2018 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package host
-
-import (
- "bytes"
- "fmt"
- "strings"
- "time"
-
- "github.com/google/syzkaller/pkg/csource"
- "github.com/google/syzkaller/pkg/flatrpc"
- "github.com/google/syzkaller/pkg/ipc"
- "github.com/google/syzkaller/pkg/log"
- "github.com/google/syzkaller/pkg/osutil"
- "github.com/google/syzkaller/prog"
- "github.com/google/syzkaller/sys/targets"
-)
-
-// SetupFeatures enables and does any one-time setup for the requested features on the host.
-// Note: this can be called multiple times and must be idempotent.
-func SetupFeatures(target *prog.Target, executor string, mask flatrpc.Feature, flags csource.Features) (
- []*flatrpc.FeatureInfo, error) {
- if noHostChecks(target) {
- return nil, nil
- }
- var results []*flatrpc.FeatureInfo
- resultC := make(chan *flatrpc.FeatureInfo)
- for feat := range flatrpc.EnumNamesFeature {
- feat := feat
- if mask&feat == 0 {
- continue
- }
- opt := ipc.FlatRPCFeaturesToCSource[feat]
- if opt != "" && flags != nil && !flags["binfmt_misc"].Enabled {
- continue
- }
- results = append(results, nil)
- go setupFeature(executor, feat, resultC)
- }
- // Feature 0 setups common things that are not part of any feature.
- setupFeature(executor, 0, nil)
- for i := range results {
- results[i] = <-resultC
- }
- return results, nil
-}
-
-func setupFeature(executor string, feat flatrpc.Feature, resultC chan *flatrpc.FeatureInfo) {
- args := strings.Split(executor, " ")
- executor = args[0]
- args = append(args[1:], "setup", fmt.Sprint(uint64(feat)))
- output, err := osutil.RunCmd(3*time.Minute, "", executor, args...)
- log.Logf(1, "executor %v\n%s", args, bytes.ReplaceAll(output, []byte("SYZFAIL:"), nil))
- outputStr := string(output)
- if err == nil {
- outputStr = ""
- } else if outputStr == "" {
- outputStr = err.Error()
- }
- needSetup := true
- if strings.Contains(outputStr, "feature setup is not needed") {
- needSetup = false
- outputStr = ""
- }
- if resultC != nil {
- resultC <- &flatrpc.FeatureInfo{
- Id: feat,
- NeedSetup: needSetup,
- Reason: outputStr,
- }
- }
-}
-
-func noHostChecks(target *prog.Target) bool {
- // HostFuzzer targets can't run Go binaries on the targets,
- // so we actually run on the host on another OS. The same for targets.TestOS OS.
- return targets.Get(target.OS, target.Arch).HostFuzzer || target.OS == targets.TestOS
-}
diff --git a/pkg/host/machine_info.go b/pkg/host/machine_info.go
deleted file mode 100644
index c51ccf380..000000000
--- a/pkg/host/machine_info.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2020 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package host
-
-import (
- "os"
- "path/filepath"
- "strings"
-
- "github.com/google/syzkaller/pkg/flatrpc"
-)
-
-func ReadFiles(files []string) []*flatrpc.FileInfo {
- var res []*flatrpc.FileInfo
- for _, glob := range files {
- glob = filepath.FromSlash(glob)
- if !strings.Contains(glob, "*") {
- res = append(res, readFile(glob))
- continue
- }
- matches, err := filepath.Glob(glob)
- if err != nil {
- res = append(res, &flatrpc.FileInfo{
- Name: glob,
- Error: err.Error(),
- })
- continue
- }
- for _, file := range matches {
- res = append(res, readFile(file))
- }
- }
- return res
-}
-
-func readFile(file string) *flatrpc.FileInfo {
- data, err := os.ReadFile(file)
- exists, errStr := true, ""
- if err != nil {
- exists, errStr = !os.IsNotExist(err), err.Error()
- }
- return &flatrpc.FileInfo{
- Name: file,
- Exists: exists,
- Error: errStr,
- Data: data,
- }
-}
diff --git a/pkg/instance/instance.go b/pkg/instance/instance.go
index f8ce5cb05..ff7bb9f0d 100644
--- a/pkg/instance/instance.go
+++ b/pkg/instance/instance.go
@@ -91,7 +91,7 @@ func (env *env) BuildSyzkaller(repoURL, commit string) (string, error) {
return "", fmt.Errorf("failed to checkout syzkaller repo: %w", err)
}
// The following commit ("syz-fuzzer: support optional flags") adds support for optional flags
- // in syz-fuzzer and syz-execprog. This is required to invoke older binaries with newer flags
+ // in syz-execprog. This is required to invoke older binaries with newer flags
// without failing due to unknown flags.
optionalFlags, err := repo.Contains("64435345f0891706a7e0c7885f5f7487581e6005")
if err != nil {
@@ -438,53 +438,6 @@ func (inst *inst) csourceOptions() (csource.Options, error) {
return opts, nil
}
-type OptionalFuzzerArgs struct {
- Slowdown int
- SandboxArg int64
- PprofPort int
-}
-
-type FuzzerCmdArgs struct {
- Fuzzer string
- Executor string
- Name string
- OS string
- Arch string
- FwdAddr string
- Sandbox string
- Verbosity int
- Cover bool
- Debug bool
- Optional *OptionalFuzzerArgs
-}
-
-func FuzzerCmd(args *FuzzerCmdArgs) string {
- osArg := ""
- if targets.Get(args.OS, args.Arch).HostFuzzer {
- // Only these OSes need the flag, because the rest assume host OS.
- // But speciying OS for all OSes breaks patch testing on syzbot
- // because old execprog does not have os flag.
- osArg = " -os=" + args.OS
- }
- verbosityArg := ""
- if args.Verbosity != 0 {
- verbosityArg = fmt.Sprintf(" -vv=%v", args.Verbosity)
- }
- optionalArg := ""
- if args.Optional != nil {
- flags := []tool.Flag{
- {Name: "slowdown", Value: fmt.Sprint(args.Optional.Slowdown)},
- {Name: "sandbox_arg", Value: fmt.Sprint(args.Optional.SandboxArg)},
- {Name: "pprof_port", Value: fmt.Sprint(args.Optional.PprofPort)},
- }
- optionalArg = " " + tool.OptionalFlags(flags)
- }
- return fmt.Sprintf("%v -executor=%v -name=%v -arch=%v%v -manager=%v -sandbox=%v"+
- " -cover=%v -debug=%v %v%v",
- args.Fuzzer, args.Executor, args.Name, args.Arch, osArg, args.FwdAddr, args.Sandbox,
- args.Cover, args.Debug, verbosityArg, optionalArg)
-}
-
func ExecprogCmd(execprog, executor, OS, arch, sandbox string, sandboxArg int, repeat, threaded, collide bool,
procs, faultCall, faultNth int, optionalFlags bool, slowdown int, progFile string) string {
repeatCount := 1
diff --git a/pkg/ipc/gate.go b/pkg/ipc/gate.go
deleted file mode 100644
index b1b1f1fc8..000000000
--- a/pkg/ipc/gate.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2015 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package ipc
-
-import (
- "sync"
-)
-
-// Gate limits concurrency level and window to the given value.
-// Limitation of concurrency window means that if a very old activity is still
-// running it will not let new activities to start even if concurrency level is low.
-type Gate struct {
- cv *sync.Cond
- busy []bool
- pos int
- running int
- stop bool
- f func()
-}
-
-// If f is not nil, it will be called after each batch of c activities.
-func NewGate(c int, f func()) *Gate {
- return &Gate{
- cv: sync.NewCond(new(sync.Mutex)),
- busy: make([]bool, c),
- f: f,
- }
-}
-
-func (g *Gate) Enter() int {
- g.cv.L.Lock()
- for g.busy[g.pos] || g.stop {
- g.cv.Wait()
- }
- idx := g.pos
- g.pos++
- if g.pos >= len(g.busy) {
- g.pos = 0
- }
- g.busy[idx] = true
- g.running++
- if g.running > len(g.busy) {
- panic("broken gate")
- }
- g.cv.L.Unlock()
- return idx
-}
-
-func (g *Gate) Leave(idx int) {
- g.cv.L.Lock()
- if !g.busy[idx] {
- panic("broken gate")
- }
- g.busy[idx] = false
- g.running--
- if g.running < 0 {
- panic("broken gate")
- }
- if idx == 0 && g.f != nil {
- if g.stop {
- panic("broken gate")
- }
- g.stop = true
- for g.running != 0 {
- g.cv.Wait()
- }
- g.stop = false
- g.f()
- g.cv.Broadcast()
- }
- if idx == g.pos && !g.stop || g.running == 0 && g.stop {
- g.cv.Broadcast()
- }
- g.cv.L.Unlock()
-}
diff --git a/pkg/ipc/ipc.go b/pkg/ipc/ipc.go
deleted file mode 100644
index c09137e3b..000000000
--- a/pkg/ipc/ipc.go
+++ /dev/null
@@ -1,838 +0,0 @@
-// Copyright 2015 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package ipc
-
-import (
- "fmt"
- "io"
- "os"
- "os/exec"
- "path/filepath"
- "slices"
- "strings"
- "sync"
- "time"
- "unsafe"
-
- "github.com/google/syzkaller/pkg/cover"
- "github.com/google/syzkaller/pkg/csource"
- "github.com/google/syzkaller/pkg/flatrpc"
- "github.com/google/syzkaller/pkg/osutil"
- "github.com/google/syzkaller/pkg/signal"
- "github.com/google/syzkaller/prog"
- "github.com/google/syzkaller/sys/targets"
-)
-
-// Config is the configuration for Env.
-type Config struct {
- // Path to executor binary.
- Executor string
-
- UseForkServer bool // use extended protocol with handshake
- RateLimit bool // rate limit start of new processes for host fuzzer mode
-
- Timeouts targets.Timeouts
-
- CoverFilter []uint64
-}
-
-type Env struct {
- in []byte
- out []byte
-
- cmd *command
- inFile *os.File
- outFile *os.File
- bin []string
- linkedBin string
- pid int
- config *Config
-}
-
-const (
- outputSize = 16 << 20
-
- statusFail = 67
-
- // Comparison types masks taken from KCOV headers.
- compSizeMask = 6
- compSize8 = 6
- compConstMask = 1
-
- extraReplyIndex = 0xffffffff // uint32(-1)
-)
-
-func SandboxToFlags(sandbox string) (flatrpc.ExecEnv, error) {
- switch sandbox {
- case "none":
- return 0, nil
- case "setuid":
- return flatrpc.ExecEnvSandboxSetuid, nil
- case "namespace":
- return flatrpc.ExecEnvSandboxNamespace, nil
- case "android":
- return flatrpc.ExecEnvSandboxAndroid, nil
- default:
- return 0, fmt.Errorf("sandbox must contain one of none/setuid/namespace/android")
- }
-}
-
-func FlagsToSandbox(flags flatrpc.ExecEnv) string {
- if flags&flatrpc.ExecEnvSandboxSetuid != 0 {
- return "setuid"
- } else if flags&flatrpc.ExecEnvSandboxNamespace != 0 {
- return "namespace"
- } else if flags&flatrpc.ExecEnvSandboxAndroid != 0 {
- return "android"
- }
- return "none"
-}
-
-func FeaturesToFlags(features flatrpc.Feature, manual csource.Features) flatrpc.ExecEnv {
- for feat := range flatrpc.EnumNamesFeature {
- opt := FlatRPCFeaturesToCSource[feat]
- if opt != "" && manual != nil && !manual[opt].Enabled {
- features &= ^feat
- }
- }
- var flags flatrpc.ExecEnv
- if manual == nil || manual["net_reset"].Enabled {
- flags |= flatrpc.ExecEnvEnableNetReset
- }
- if manual == nil || manual["cgroups"].Enabled {
- flags |= flatrpc.ExecEnvEnableCgroups
- }
- if manual == nil || manual["close_fds"].Enabled {
- flags |= flatrpc.ExecEnvEnableCloseFds
- }
- if features&flatrpc.FeatureExtraCoverage != 0 {
- flags |= flatrpc.ExecEnvExtraCover
- }
- if features&flatrpc.FeatureDelayKcovMmap != 0 {
- flags |= flatrpc.ExecEnvDelayKcovMmap
- }
- if features&flatrpc.FeatureNetInjection != 0 {
- flags |= flatrpc.ExecEnvEnableTun
- }
- if features&flatrpc.FeatureNetDevices != 0 {
- flags |= flatrpc.ExecEnvEnableNetDev
- }
- if features&flatrpc.FeatureDevlinkPCI != 0 {
- flags |= flatrpc.ExecEnvEnableDevlinkPCI
- }
- if features&flatrpc.FeatureNicVF != 0 {
- flags |= flatrpc.ExecEnvEnableNicVF
- }
- if features&flatrpc.FeatureVhciInjection != 0 {
- flags |= flatrpc.ExecEnvEnableVhciInjection
- }
- if features&flatrpc.FeatureWifiEmulation != 0 {
- flags |= flatrpc.ExecEnvEnableWifi
- }
- return flags
-}
-
-var FlatRPCFeaturesToCSource = map[flatrpc.Feature]string{
- flatrpc.FeatureNetInjection: "tun",
- flatrpc.FeatureNetDevices: "net_dev",
- flatrpc.FeatureDevlinkPCI: "devlink_pci",
- flatrpc.FeatureNicVF: "nic_vf",
- flatrpc.FeatureVhciInjection: "vhci",
- flatrpc.FeatureWifiEmulation: "wifi",
- flatrpc.FeatureUSBEmulation: "usb",
- flatrpc.FeatureBinFmtMisc: "binfmt_misc",
- flatrpc.FeatureLRWPANEmulation: "ieee802154",
- flatrpc.FeatureSwap: "swap",
-}
-
-func MakeEnv(config *Config, pid int) (*Env, error) {
- if config.Timeouts.Slowdown == 0 || config.Timeouts.Scale == 0 ||
- config.Timeouts.Syscall == 0 || config.Timeouts.Program == 0 {
- return nil, fmt.Errorf("ipc.MakeEnv: uninitialized timeouts (%+v)", config.Timeouts)
- }
- var inf, outf *os.File
- var inmem, outmem []byte
- var err error
- inf, inmem, err = osutil.CreateMemMappedFile(prog.ExecBufferSize)
- if err != nil {
- return nil, err
- }
- defer func() {
- if inf != nil {
- osutil.CloseMemMappedFile(inf, inmem)
- }
- }()
- outf, outmem, err = osutil.CreateMemMappedFile(outputSize)
- if err != nil {
- return nil, err
- }
- defer func() {
- if outf != nil {
- osutil.CloseMemMappedFile(outf, outmem)
- }
- }()
- env := &Env{
- in: inmem,
- out: outmem,
- inFile: inf,
- outFile: outf,
- bin: append(strings.Split(config.Executor, " "), "exec"),
- pid: pid,
- config: config,
- }
- if len(env.bin) == 0 {
- return nil, fmt.Errorf("binary is empty string")
- }
- env.bin[0] = osutil.Abs(env.bin[0]) // we are going to chdir
- // Append pid to binary name.
- // E.g. if binary is 'syz-executor' and pid=15,
- // we create a link from 'syz-executor.15' to 'syz-executor' and use 'syz-executor.15' as binary.
- // This allows to easily identify program that lead to a crash in the log.
- // Log contains pid in "executing program 15" and crashes usually contain "Comm: syz-executor.15".
- // Note: pkg/report knowns about this and converts "syz-executor.15" back to "syz-executor".
- base := filepath.Base(env.bin[0])
- pidStr := fmt.Sprintf(".%v", pid)
- const maxLen = 16 // TASK_COMM_LEN is currently set to 16
- if len(base)+len(pidStr) >= maxLen {
- // Remove beginning of file name, in tests temp files have unique numbers at the end.
- base = base[len(base)+len(pidStr)-maxLen+1:]
- }
- binCopy := filepath.Join(filepath.Dir(env.bin[0]), base+pidStr)
- if err := os.Link(env.bin[0], binCopy); err == nil {
- env.bin[0] = binCopy
- env.linkedBin = binCopy
- }
- inf = nil
- outf = nil
- return env, nil
-}
-
-func (env *Env) Close() error {
- if env.cmd != nil {
- env.cmd.close()
- }
- if env.linkedBin != "" {
- os.Remove(env.linkedBin)
- }
- var err1, err2 error
- if env.inFile != nil {
- err1 = osutil.CloseMemMappedFile(env.inFile, env.in)
- }
- if env.outFile != nil {
- err2 = osutil.CloseMemMappedFile(env.outFile, env.out)
- }
- switch {
- case err1 != nil:
- return err1
- case err2 != nil:
- return err2
- default:
- return nil
- }
-}
-
-// Exec starts executor binary to execute program stored in progData in exec encoding
-// and returns information about the execution:
-// output: process output
-// info: per-call info
-// hanged: program hanged and was killed
-// err0: failed to start the process or bug in executor itself.
-func (env *Env) ExecProg(opts *flatrpc.ExecOpts, progData []byte) (
- output []byte, info *flatrpc.ProgInfo, hanged bool, err0 error) {
- ncalls, err := prog.ExecCallCount(progData)
- if err != nil {
- err0 = err
- return
- }
- // Copy-in serialized program.
- copy(env.in, progData)
- // Zero out the first two words (ncmd and nsig), so that we don't have garbage there
- // if executor crashes before writing non-garbage there.
- for i := 0; i < 4; i++ {
- env.out[i] = 0
- }
-
- err0 = env.RestartIfNeeded(opts)
- if err0 != nil {
- return
- }
-
- start := osutil.MonotonicNano()
- output, hanged, err0 = env.cmd.exec(opts)
- elapsed := osutil.MonotonicNano() - start
- if err0 != nil {
- env.cmd.close()
- env.cmd = nil
- return
- }
-
- info, err0 = env.parseOutput(opts, ncalls)
- if info != nil {
- info.Elapsed = uint64(elapsed)
- info.Freshness = env.cmd.freshness
- }
- env.cmd.freshness++
- if !env.config.UseForkServer {
- env.cmd.close()
- env.cmd = nil
- }
- return
-}
-
-func (env *Env) Exec(opts *flatrpc.ExecOpts, p *prog.Prog) (
- output []byte, info *flatrpc.ProgInfo, hanged bool, err0 error) {
- progData, err := p.SerializeForExec()
- if err != nil {
- err0 = err
- return
- }
- return env.ExecProg(opts, progData)
-}
-
-func (env *Env) ForceRestart() {
- if env.cmd != nil {
- env.cmd.close()
- env.cmd = nil
- }
-}
-
-// RestartIfNeeded brings up an executor process if it was stopped.
-func (env *Env) RestartIfNeeded(opts *flatrpc.ExecOpts) error {
- if env.cmd != nil {
- if env.cmd.flags == opts.EnvFlags && env.cmd.sandboxArg == opts.SandboxArg {
- return nil
- }
- env.ForceRestart()
- }
- if env.config.RateLimit {
- rateLimiterOnce.Do(func() {
- rateLimiter = time.NewTicker(1 * time.Second).C
- })
- <-rateLimiter
- }
- var err error
- env.cmd, err = env.makeCommand(opts, "./")
- return err
-}
-
-var (
- rateLimiterOnce sync.Once
- rateLimiter <-chan time.Time
-)
-
-func (env *Env) parseOutput(opts *flatrpc.ExecOpts, ncalls int) (*flatrpc.ProgInfo, error) {
- out := env.out
- ncmd, ok := readUint32(&out)
- if !ok {
- return nil, fmt.Errorf("failed to read number of calls")
- }
- info := flatrpc.EmptyProgInfo(ncalls)
- extraParts := make([]flatrpc.CallInfo, 0)
- for i := uint32(0); i < ncmd; i++ {
- if len(out) < int(unsafe.Sizeof(callReply{})) {
- return nil, fmt.Errorf("failed to read call %v reply", i)
- }
- reply := *(*callReply)(unsafe.Pointer(&out[0]))
- out = out[unsafe.Sizeof(callReply{}):]
- var inf *flatrpc.CallInfo
- if reply.magic != outMagic {
- return nil, fmt.Errorf("bad reply magic 0x%x", reply.magic)
- }
- if reply.index != extraReplyIndex {
- if int(reply.index) >= len(info.Calls) {
- return nil, fmt.Errorf("bad call %v index %v/%v", i, reply.index, len(info.Calls))
- }
- inf = info.Calls[reply.index]
- if inf.Flags != 0 || inf.Signal != nil {
- return nil, fmt.Errorf("duplicate reply for call %v/%v/%v", i, reply.index, reply.num)
- }
- inf.Error = int32(reply.errno)
- inf.Flags = flatrpc.CallFlag(reply.flags)
- } else {
- extraParts = append(extraParts, flatrpc.CallInfo{})
- inf = &extraParts[len(extraParts)-1]
- }
- if inf.Signal, ok = readUint64Array(&out, reply.signalSize); !ok {
- return nil, fmt.Errorf("call %v/%v/%v: signal overflow: %v/%v",
- i, reply.index, reply.num, reply.signalSize, len(out))
- }
- if inf.Cover, ok = readUint64Array(&out, reply.coverSize); !ok {
- return nil, fmt.Errorf("call %v/%v/%v: cover overflow: %v/%v",
- i, reply.index, reply.num, reply.coverSize, len(out))
- }
- comps, err := readComps(&out, reply.compsSize)
- if err != nil {
- return nil, err
- }
- inf.Comps = comps
- }
- if len(extraParts) == 0 {
- return info, nil
- }
- info.Extra = convertExtra(extraParts, opts.ExecFlags&flatrpc.ExecFlagDedupCover != 0)
- return info, nil
-}
-
-func convertExtra(extraParts []flatrpc.CallInfo, dedupCover bool) *flatrpc.CallInfo {
- var extra flatrpc.CallInfo
- if dedupCover {
- extraCover := make(cover.Cover)
- for _, part := range extraParts {
- extraCover.Merge(part.Cover)
- }
- extra.Cover = extraCover.Serialize()
- } else {
- for _, part := range extraParts {
- extra.Cover = append(extra.Cover, part.Cover...)
- }
- }
- extraSignal := make(signal.Signal)
- for _, part := range extraParts {
- extraSignal.Merge(signal.FromRaw(part.Signal, 0))
- }
- extra.Signal = make([]uint64, len(extraSignal))
- i := 0
- for s := range extraSignal {
- extra.Signal[i] = uint64(s)
- i++
- }
- return &extra
-}
-
-func readComps(outp *[]byte, compsSize uint32) ([]*flatrpc.Comparison, error) {
- comps := make([]*flatrpc.Comparison, 0, 2*compsSize)
- for i := uint32(0); i < compsSize; i++ {
- typ, ok := readUint32(outp)
- if !ok {
- return nil, fmt.Errorf("failed to read comp %v", i)
- }
- if typ > compConstMask|compSizeMask {
- return nil, fmt.Errorf("bad comp %v type %v", i, typ)
- }
- var op1, op2 uint64
- var ok1, ok2 bool
- if typ&compSizeMask == compSize8 {
- op1, ok1 = readUint64(outp)
- op2, ok2 = readUint64(outp)
- } else {
- var tmp1, tmp2 uint32
- tmp1, ok1 = readUint32(outp)
- tmp2, ok2 = readUint32(outp)
- op1, op2 = uint64(int64(int32(tmp1))), uint64(int64(int32(tmp2)))
- }
- if !ok1 || !ok2 {
- return nil, fmt.Errorf("failed to read comp %v op", i)
- }
- if op1 == op2 {
- continue // it's useless to store such comparisons
- }
- comps = append(comps, &flatrpc.Comparison{Op1: op2, Op2: op1})
- if (typ & compConstMask) != 0 {
- // If one of the operands was const, then this operand is always
- // placed first in the instrumented callbacks. Such an operand
- // could not be an argument of our syscalls (because otherwise
- // it wouldn't be const), thus we simply ignore it.
- continue
- }
- comps = append(comps, &flatrpc.Comparison{Op1: op1, Op2: op2})
- }
- return comps, nil
-}
-
-func readUint32(outp *[]byte) (uint32, bool) {
- out := *outp
- if len(out) < 4 {
- return 0, false
- }
- v := prog.HostEndian.Uint32(out)
- *outp = out[4:]
- return v, true
-}
-
-func readUint64(outp *[]byte) (uint64, bool) {
- out := *outp
- if len(out) < 8 {
- return 0, false
- }
- v := prog.HostEndian.Uint64(out)
- *outp = out[8:]
- return v, true
-}
-
-func readUint64Array(outp *[]byte, size uint32) ([]uint64, bool) {
- if size == 0 {
- return nil, true
- }
- out := *outp
- dataSize := int(size * 8)
- if dataSize > len(out) {
- return nil, false
- }
- res := unsafe.Slice((*uint64)(unsafe.Pointer(&out[0])), size)
- *outp = out[dataSize:]
- // Detach the resulting array from the original data.
- return slices.Clone(res), true
-}
-
-type command struct {
- pid int
- config *Config
- flags flatrpc.ExecEnv
- sandboxArg int64
- timeout time.Duration
- cmd *exec.Cmd
- dir string
- readDone chan []byte
- exited chan error
- inrp *os.File
- outwp *os.File
- outmem []byte
- freshness uint64
-}
-
-const (
- inMagic = uint64(0xbadc0ffeebadface)
- outMagic = uint32(0xbadf00d)
-)
-
-type handshakeReq struct {
- magic uint64
- flags uint64 // env flags
- pid uint64
- sandboxArg uint64
- coverFilterSize uint64
- // Followed by [coverFilterSize]uint64 filter.
-}
-
-type handshakeReply struct {
- magic uint32
-}
-
-type executeReq struct {
- magic uint64
- envFlags uint64 // env flags
- execFlags uint64 // exec flags
- pid uint64
- syscallTimeoutMS uint64
- programTimeoutMS uint64
- slowdownScale uint64
-}
-
-type executeReply struct {
- magic uint32
- // If done is 0, then this is call completion message followed by callReply.
- // If done is 1, then program execution is finished and status is set.
- done uint32
- status uint32
-}
-
-type callReply struct {
- magic uint32
- index uint32 // call index in the program
- num uint32 // syscall number (for cross-checking)
- errno uint32
- flags uint32 // see CallFlags
- signalSize uint32
- coverSize uint32
- compsSize uint32
- // signal/cover/comps follow
-}
-
-func (env *Env) makeCommand(opts *flatrpc.ExecOpts, tmpDir string) (*command, error) {
- dir, err := os.MkdirTemp(tmpDir, "syzkaller-testdir")
- if err != nil {
- return nil, fmt.Errorf("failed to create temp dir: %w", err)
- }
- dir = osutil.Abs(dir)
-
- timeout := env.config.Timeouts.Program
- if env.config.UseForkServer {
- // Executor has an internal timeout and protects against most hangs when fork server is enabled,
- // so we use quite large timeout. Executor can be slow due to global locks in namespaces
- // and other things, so let's better wait than report false misleading crashes.
- timeout *= 5
- }
-
- c := &command{
- pid: env.pid,
- config: env.config,
- flags: opts.EnvFlags,
- sandboxArg: opts.SandboxArg,
- timeout: timeout,
- dir: dir,
- outmem: env.out,
- }
- defer func() {
- if c != nil {
- c.close()
- }
- }()
-
- if err := os.Chmod(dir, 0777); err != nil {
- return nil, fmt.Errorf("failed to chmod temp dir: %w", err)
- }
-
- // Output capture pipe.
- rp, wp, err := os.Pipe()
- if err != nil {
- return nil, fmt.Errorf("failed to create pipe: %w", err)
- }
- defer wp.Close()
-
- // executor->ipc command pipe.
- inrp, inwp, err := os.Pipe()
- if err != nil {
- return nil, fmt.Errorf("failed to create pipe: %w", err)
- }
- defer inwp.Close()
- c.inrp = inrp
-
- // ipc->executor command pipe.
- outrp, outwp, err := os.Pipe()
- if err != nil {
- return nil, fmt.Errorf("failed to create pipe: %w", err)
- }
- defer outrp.Close()
- c.outwp = outwp
-
- c.readDone = make(chan []byte, 1)
-
- cmd := osutil.Command(env.bin[0], env.bin[1:]...)
- if env.inFile != nil && env.outFile != nil {
- cmd.ExtraFiles = []*os.File{env.inFile, env.outFile}
- }
- cmd.Dir = dir
- // Tell ASAN to not mess with our NONFAILING.
- cmd.Env = append(append([]string{}, os.Environ()...), "ASAN_OPTIONS=handle_segv=0 allow_user_segv_handler=1")
- cmd.Stdin = outrp
- cmd.Stdout = inwp
- if c.flags&flatrpc.ExecEnvDebug != 0 {
- close(c.readDone)
- cmd.Stderr = os.Stdout
- } else {
- cmd.Stderr = wp
- go func(c *command) {
- // Read out output in case executor constantly prints something.
- const bufSize = 128 << 10
- output := make([]byte, bufSize)
- var size uint64
- for {
- n, err := rp.Read(output[size:])
- if n > 0 {
- size += uint64(n)
- if size >= bufSize*3/4 {
- copy(output, output[size-bufSize/2:size])
- size = bufSize / 2
- }
- }
- if err != nil {
- rp.Close()
- c.readDone <- output[:size]
- close(c.readDone)
- return
- }
- }
- }(c)
- }
- if err := cmd.Start(); err != nil {
- return nil, fmt.Errorf("failed to start executor binary: %w", err)
- }
- c.exited = make(chan error, 1)
- c.cmd = cmd
- go func(c *command) {
- err := c.cmd.Wait()
- c.exited <- err
- close(c.exited)
- // Avoid a livelock if cmd.Stderr has been leaked to another alive process.
- rp.SetDeadline(time.Now().Add(5 * time.Second))
- }(c)
- wp.Close()
- // Note: we explicitly close inwp before calling handshake even though we defer it above.
- // If we don't do it and executor exits before writing handshake reply,
- // reading from inrp will hang since we hold another end of the pipe open.
- inwp.Close()
-
- if c.config.UseForkServer {
- if err := c.handshake(); err != nil {
- return nil, err
- }
- }
- tmp := c
- c = nil // disable defer above
- return tmp, nil
-}
-
-func (c *command) close() {
- if c.cmd != nil {
- c.cmd.Process.Kill()
- c.wait()
- }
- osutil.RemoveAll(c.dir)
- if c.inrp != nil {
- c.inrp.Close()
- }
- if c.outwp != nil {
- c.outwp.Close()
- }
-}
-
-// handshake sends handshakeReq and waits for handshakeReply.
-func (c *command) handshake() error {
- req := &handshakeReq{
- magic: inMagic,
- flags: uint64(c.flags),
- pid: uint64(c.pid),
- sandboxArg: uint64(c.sandboxArg),
- coverFilterSize: uint64(len(c.config.CoverFilter)),
- }
- reqData := (*[unsafe.Sizeof(*req)]byte)(unsafe.Pointer(req))[:]
- if _, err := c.outwp.Write(reqData); err != nil {
- return c.handshakeError(fmt.Errorf("failed to write control pipe: %w", err))
- }
- if req.coverFilterSize != 0 {
- ptr := (*byte)(unsafe.Pointer(&c.config.CoverFilter[0]))
- size := uintptr(req.coverFilterSize) * unsafe.Sizeof(c.config.CoverFilter[0])
- coverFilter := unsafe.Slice(ptr, size)
- if _, err := c.outwp.Write(coverFilter); err != nil {
- return c.handshakeError(fmt.Errorf("failed to write control pipe: %w", err))
- }
- }
-
- read := make(chan error, 1)
- go func() {
- reply := &handshakeReply{}
- replyData := (*[unsafe.Sizeof(*reply)]byte)(unsafe.Pointer(reply))[:]
- if _, err := io.ReadFull(c.inrp, replyData); err != nil {
- read <- err
- return
- }
- if reply.magic != outMagic {
- read <- fmt.Errorf("bad handshake reply magic 0x%x", reply.magic)
- return
- }
- read <- nil
- }()
- // Sandbox setup can take significant time.
- timeout := time.NewTimer(time.Minute * c.config.Timeouts.Scale)
- select {
- case err := <-read:
- timeout.Stop()
- if err != nil {
- return c.handshakeError(err)
- }
- return nil
- case <-timeout.C:
- return c.handshakeError(fmt.Errorf("not serving"))
- }
-}
-
-func (c *command) handshakeError(err error) error {
- c.cmd.Process.Kill()
- output := <-c.readDone
- err = fmt.Errorf("executor %v: %w\n%s", c.pid, err, output)
- c.wait()
- return err
-}
-
-func (c *command) wait() error {
- return <-c.exited
-}
-
-func (c *command) exec(opts *flatrpc.ExecOpts) (output []byte, hanged bool, err0 error) {
- if c.flags != opts.EnvFlags || c.sandboxArg != opts.SandboxArg {
- panic("wrong command")
- }
- req := &executeReq{
- magic: inMagic,
- envFlags: uint64(c.flags),
- execFlags: uint64(opts.ExecFlags),
- pid: uint64(c.pid),
- syscallTimeoutMS: uint64(c.config.Timeouts.Syscall / time.Millisecond),
- programTimeoutMS: uint64(c.config.Timeouts.Program / time.Millisecond),
- slowdownScale: uint64(c.config.Timeouts.Scale),
- }
- reqData := (*[unsafe.Sizeof(*req)]byte)(unsafe.Pointer(req))[:]
- if _, err := c.outwp.Write(reqData); err != nil {
- output = <-c.readDone
- err0 = fmt.Errorf("executor %v: failed to write control pipe: %w", c.pid, err)
- return
- }
- // At this point program is executing.
-
- done := make(chan bool)
- hang := make(chan bool)
- go func() {
- t := time.NewTimer(c.timeout)
- select {
- case <-t.C:
- c.cmd.Process.Kill()
- hang <- true
- case <-done:
- t.Stop()
- hang <- false
- }
- }()
- exitStatus := -1
- completedCalls := (*uint32)(unsafe.Pointer(&c.outmem[0]))
- outmem := c.outmem[4:]
- for {
- reply := &executeReply{}
- replyData := (*[unsafe.Sizeof(*reply)]byte)(unsafe.Pointer(reply))[:]
- if _, err := io.ReadFull(c.inrp, replyData); err != nil {
- break
- }
- if reply.magic != outMagic {
- fmt.Fprintf(os.Stderr, "executor %v: got bad reply magic 0x%x\n", c.pid, reply.magic)
- os.Exit(1)
- }
- if reply.done != 0 {
- exitStatus = int(reply.status)
- break
- }
- callReply := &callReply{}
- callReplyData := (*[unsafe.Sizeof(*callReply)]byte)(unsafe.Pointer(callReply))[:]
- if _, err := io.ReadFull(c.inrp, callReplyData); err != nil {
- break
- }
- if callReply.signalSize != 0 || callReply.coverSize != 0 || callReply.compsSize != 0 {
- // This is unsupported yet.
- fmt.Fprintf(os.Stderr, "executor %v: got call reply with coverage\n", c.pid)
- os.Exit(1)
- }
- copy(outmem, callReplyData)
- outmem = outmem[len(callReplyData):]
- *completedCalls++
- }
- close(done)
- if exitStatus == 0 {
- // Program was OK.
- <-hang
- return
- }
- c.cmd.Process.Kill()
- output = <-c.readDone
- err := c.wait()
- if err != nil {
- output = append(output, err.Error()...)
- output = append(output, '\n')
- }
- if <-hang {
- hanged = true
- return
- }
- if exitStatus == -1 {
- if c.cmd.ProcessState == nil {
- exitStatus = statusFail
- } else {
- exitStatus = osutil.ProcessExitStatus(c.cmd.ProcessState)
- }
- }
- // Ignore all other errors.
- // Without fork server executor can legitimately exit (program contains exit_group),
- // with fork server the top process can exit with statusFail if it wants special handling.
- if exitStatus == statusFail {
- err0 = fmt.Errorf("executor %v: exit status %d err %w\n%s", c.pid, exitStatus, err, output)
- }
- return
-}
diff --git a/pkg/ipc/ipc_priv_test.go b/pkg/ipc/ipc_priv_test.go
deleted file mode 100644
index 02c467daf..000000000
--- a/pkg/ipc/ipc_priv_test.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2022 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package ipc
-
-import (
- "testing"
-
- "github.com/google/syzkaller/pkg/flatrpc"
-)
-
-func TestOutputDeadline(t *testing.T) {
- // Run the command that leaks stderr to a child process.
- env := &Env{
- bin: []string{
- "sh",
- "-c",
- "exec 1>&2; ( sleep 100; echo fail ) & echo done",
- },
- pid: 1,
- config: &Config{},
- }
- c, err := env.makeCommand(&flatrpc.ExecOpts{}, t.TempDir())
- if err != nil {
- t.Fatal(err)
- }
- c.wait()
- out := <-c.readDone
- if string(out) != "done\n" {
- t.Errorf("unexpected output: '%s'", out)
- }
-}
diff --git a/pkg/ipc/ipc_test.go b/pkg/ipc/ipc_test.go
deleted file mode 100644
index c70bfe79c..000000000
--- a/pkg/ipc/ipc_test.go
+++ /dev/null
@@ -1,262 +0,0 @@
-// Copyright 2015 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package ipc_test
-
-import (
- "bytes"
- "fmt"
- "math/rand"
- "runtime"
- "testing"
- "time"
-
- "github.com/google/syzkaller/pkg/csource"
- "github.com/google/syzkaller/pkg/flatrpc"
- "github.com/google/syzkaller/pkg/image"
- . "github.com/google/syzkaller/pkg/ipc"
- "github.com/google/syzkaller/pkg/ipc/ipcconfig"
- "github.com/google/syzkaller/pkg/osutil"
- "github.com/google/syzkaller/pkg/testutil"
- "github.com/google/syzkaller/prog"
- _ "github.com/google/syzkaller/sys"
- "github.com/google/syzkaller/sys/targets"
-)
-
-func initTest(t *testing.T) (*prog.Target, rand.Source, int, bool, targets.Timeouts) {
- t.Parallel()
- iters := 100
- if testing.Short() {
- iters = 10
- }
- target, err := prog.GetTarget(runtime.GOOS, runtime.GOARCH)
- if err != nil {
- t.Fatal(err)
- }
- cfg, _, err := ipcconfig.Default(target)
- if err != nil {
- t.Fatal(err)
- }
- rs := testutil.RandSource(t)
- return target, rs, iters, cfg.UseForkServer, cfg.Timeouts
-}
-
-// TestExecutor runs all internal executor unit tests.
-// We do it here because we already build executor binary here.
-func TestExecutor(t *testing.T) {
- t.Parallel()
- for _, sysTarget := range targets.List[runtime.GOOS] {
- sysTarget := targets.Get(runtime.GOOS, sysTarget.Arch)
- t.Run(sysTarget.Arch, func(t *testing.T) {
- if sysTarget.BrokenCompiler != "" {
- t.Skipf("skipping, broken cross-compiler: %v", sysTarget.BrokenCompiler)
- }
- t.Parallel()
- target, err := prog.GetTarget(runtime.GOOS, sysTarget.Arch)
- if err != nil {
- t.Fatal(err)
- }
- bin := csource.BuildExecutor(t, target, "../..")
- // qemu-user may allow us to run some cross-arch binaries.
- if _, err := osutil.RunCmd(time.Minute, "", bin, "test"); err != nil {
- if sysTarget.Arch == runtime.GOARCH || sysTarget.VMArch == runtime.GOARCH {
- t.Fatal(err)
- }
- t.Skipf("skipping, cross-arch binary failed: %v", err)
- }
- })
- }
-}
-
-func prepareTestProgram(target *prog.Target) *prog.Prog {
- p := target.DataMmapProg()
- if len(p.Calls) > 1 {
- p.Calls[1].Props.Async = true
- }
- return p
-}
-
-func TestExecute(t *testing.T) {
- target, _, _, useForkServer, timeouts := initTest(t)
-
- bin := csource.BuildExecutor(t, target, "../..")
-
- flags := []flatrpc.ExecFlag{0, flatrpc.ExecFlagThreaded}
- for _, flag := range flags {
- t.Logf("testing flags 0x%x", flag)
- cfg := &Config{
- Executor: bin,
- UseForkServer: useForkServer,
- Timeouts: timeouts,
- }
- env, err := MakeEnv(cfg, 0)
- if err != nil {
- t.Fatalf("failed to create env: %v", err)
- }
- defer env.Close()
-
- for i := 0; i < 10; i++ {
- p := prepareTestProgram(target)
- opts := &flatrpc.ExecOpts{
- ExecFlags: flag,
- }
- output, info, hanged, err := env.Exec(opts, p)
- if err != nil {
- t.Fatalf("failed to run executor: %v", err)
- }
- if hanged {
- t.Fatalf("program hanged:\n%s", output)
- }
- if len(info.Calls) != len(p.Calls) {
- t.Fatalf("executed less calls (%v) than prog len(%v):\n%s", len(info.Calls), len(p.Calls), output)
- }
- if info.Calls[0].Error != 0 {
- t.Fatalf("simple call failed: %v\n%s", info.Calls[0].Error, output)
- }
- if len(output) != 0 {
- t.Fatalf("output on empty program")
- }
- }
- }
-}
-
-func TestParallel(t *testing.T) {
- target, _, _, useForkServer, timeouts := initTest(t)
- bin := csource.BuildExecutor(t, target, "../..")
- cfg := &Config{
- Executor: bin,
- UseForkServer: useForkServer,
- Timeouts: timeouts,
- }
- const P = 10
- errs := make(chan error, P)
- for p := 0; p < P; p++ {
- p := p
- go func() {
- env, err := MakeEnv(cfg, p)
- if err != nil {
- errs <- fmt.Errorf("failed to create env: %w", err)
- return
- }
- defer func() {
- env.Close()
- errs <- err
- }()
- p := target.DataMmapProg()
- opts := &flatrpc.ExecOpts{}
- output, info, hanged, err := env.Exec(opts, p)
- if err != nil {
- err = fmt.Errorf("failed to run executor: %w", err)
- return
- }
- if hanged {
- err = fmt.Errorf("program hanged:\n%s", output)
- return
- }
- if len(info.Calls) == 0 {
- err = fmt.Errorf("no calls executed:\n%s", output)
- return
- }
- if info.Calls[0].Error != 0 {
- err = fmt.Errorf("simple call failed: %v\n%s", info.Calls[0].Error, output)
- return
- }
- if len(output) != 0 {
- err = fmt.Errorf("output on empty program")
- return
- }
- }()
- }
- for p := 0; p < P; p++ {
- if err := <-errs; err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func TestZlib(t *testing.T) {
- t.Parallel()
- target, err := prog.GetTarget(targets.TestOS, targets.TestArch64)
- if err != nil {
- t.Fatal(err)
- }
- sysTarget := targets.Get(target.OS, target.Arch)
- if sysTarget.BrokenCompiler != "" {
- t.Skipf("skipping, broken cross-compiler: %v", sysTarget.BrokenCompiler)
- }
- cfg, opts, err := ipcconfig.Default(target)
- if err != nil {
- t.Fatal(err)
- }
- opts.EnvFlags |= flatrpc.ExecEnvDebug
- cfg.Executor = csource.BuildExecutor(t, target, "../..")
- env, err := MakeEnv(cfg, 0)
- if err != nil {
- t.Fatalf("failed to create env: %v", err)
- }
- defer env.Close()
- r := rand.New(testutil.RandSource(t))
- for i := 0; i < 10; i++ {
- data := testutil.RandMountImage(r)
- compressed := image.Compress(data)
- text := fmt.Sprintf(`syz_compare_zlib(&(0x7f0000000000)="$%s", AUTO, &(0x7f0000800000)="$%s", AUTO)`,
- image.EncodeB64(data), image.EncodeB64(compressed))
- p, err := target.Deserialize([]byte(text), prog.Strict)
- if err != nil {
- t.Fatalf("failed to deserialize empty program: %v", err)
- }
- output, info, _, err := env.Exec(opts, p)
- if err != nil {
- t.Fatalf("failed to run executor: %v", err)
- }
- if info.Calls[0].Error != 0 {
- t.Fatalf("data comparison failed: %v\n%s", info.Calls[0].Error, output)
- }
- }
-}
-
-func TestExecutorCommonExt(t *testing.T) {
- target, err := prog.GetTarget("test", "64_fork")
- if err != nil {
- t.Fatal(err)
- }
- sysTarget := targets.Get(target.OS, target.Arch)
- if sysTarget.BrokenCompiler != "" {
- t.Skipf("skipping, broken cross-compiler: %v", sysTarget.BrokenCompiler)
- }
- bin := csource.BuildExecutor(t, target, "../..", "-DSYZ_TEST_COMMON_EXT_EXAMPLE=1")
- out, err := osutil.RunCmd(time.Minute, "", bin, "setup", "0")
- if err != nil {
- t.Fatal(err)
- }
- if !bytes.Contains(out, []byte("example setup_ext called")) {
- t.Fatalf("setup_ext wasn't called:\n%s", out)
- }
-
- // The example setup_ext_test does:
- // *(uint64*)(SYZ_DATA_OFFSET + 0x1234) = 0xbadc0ffee;
- // The following program tests that that value is present at 0x1234.
- test := `syz_compare(&(0x7f0000001234)="", 0x8, &(0x7f0000000000)=@blob="eeffc0ad0b000000", AUTO)`
- p, err := target.Deserialize([]byte(test), prog.Strict)
- if err != nil {
- t.Fatal(err)
- }
- cfg, opts, err := ipcconfig.Default(target)
- if err != nil {
- t.Fatal(err)
- }
- cfg.Executor = bin
- opts.EnvFlags |= flatrpc.ExecEnvDebug
- env, err := MakeEnv(cfg, 0)
- if err != nil {
- t.Fatalf("failed to create env: %v", err)
- }
- defer env.Close()
- _, info, _, err := env.Exec(opts, p)
- if err != nil {
- t.Fatal(err)
- }
- if call := info.Calls[0]; call.Flags&flatrpc.CallFlagFinished == 0 || call.Error != 0 {
- t.Fatalf("bad call result: flags=%x errno=%v", call.Flags, call.Error)
- }
-}
diff --git a/pkg/ipc/ipcconfig/ipcconfig.go b/pkg/ipc/ipcconfig/ipcconfig.go
deleted file mode 100644
index aef709a23..000000000
--- a/pkg/ipc/ipcconfig/ipcconfig.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2018 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package ipcconfig
-
-import (
- "flag"
-
- "github.com/google/syzkaller/pkg/flatrpc"
- "github.com/google/syzkaller/pkg/ipc"
- "github.com/google/syzkaller/prog"
- "github.com/google/syzkaller/sys/targets"
-)
-
-var (
- flagExecutor = flag.String("executor", "./syz-executor", "path to executor binary")
- flagThreaded = flag.Bool("threaded", true, "use threaded mode in executor")
- flagSignal = flag.Bool("cover", false, "collect feedback signals (coverage)")
- flagSandbox = flag.String("sandbox", "none", "sandbox for fuzzing (none/setuid/namespace/android)")
- flagSandboxArg = flag.Int("sandbox_arg", 0, "argument for sandbox runner to adjust it via config")
- flagDebug = flag.Bool("debug", false, "debug output from executor")
- flagSlowdown = flag.Int("slowdown", 1, "execution slowdown caused by emulation/instrumentation")
-)
-
-func Default(target *prog.Target) (*ipc.Config, *flatrpc.ExecOpts, error) {
- sysTarget := targets.Get(target.OS, target.Arch)
- c := &ipc.Config{
- Executor: *flagExecutor,
- Timeouts: sysTarget.Timeouts(*flagSlowdown),
- }
- c.UseForkServer = sysTarget.ExecutorUsesForkServer
- c.RateLimit = sysTarget.HostFuzzer && target.OS != targets.TestOS
-
- opts := &flatrpc.ExecOpts{
- ExecFlags: flatrpc.ExecFlagDedupCover,
- }
- if *flagThreaded {
- opts.ExecFlags |= flatrpc.ExecFlagThreaded
- }
- if *flagSignal {
- opts.ExecFlags |= flatrpc.ExecFlagCollectSignal
- }
- if *flagSignal {
- opts.EnvFlags |= flatrpc.ExecEnvSignal
- }
- if *flagDebug {
- opts.EnvFlags |= flatrpc.ExecEnvDebug
- }
- sandboxFlags, err := ipc.SandboxToFlags(*flagSandbox)
- if err != nil {
- return nil, nil, err
- }
- opts.SandboxArg = int64(*flagSandboxArg)
- opts.EnvFlags |= sandboxFlags
- return c, opts, nil
-}
diff --git a/pkg/mgrconfig/load.go b/pkg/mgrconfig/load.go
index 2bccdc7df..ba446ffa3 100644
--- a/pkg/mgrconfig/load.go
+++ b/pkg/mgrconfig/load.go
@@ -29,7 +29,6 @@ type Derived struct {
TargetVMArch string
// Full paths to binaries we are going to use:
- FuzzerBin string
ExecprogBin string
ExecutorBin string
@@ -39,7 +38,7 @@ type Derived struct {
// Special debugging/development mode specified by VM type "none".
// In this mode syz-manager does not start any VMs, but instead a user is supposed
- // to start syz-fuzzer process in a VM manually.
+ // to start syz-executor process in a VM manually.
VMLess bool
}
@@ -263,16 +262,12 @@ func (cfg *Config) completeBinaries() error {
targetBin := func(name, arch string) string {
return filepath.Join(cfg.Syzkaller, "bin", cfg.TargetOS+"_"+arch, name+exe)
}
- cfg.FuzzerBin = targetBin("syz-fuzzer", cfg.TargetVMArch)
cfg.ExecprogBin = targetBin("syz-execprog", cfg.TargetVMArch)
cfg.ExecutorBin = targetBin("syz-executor", cfg.TargetArch)
// If the target already provides an executor binary, we don't need to copy it.
if cfg.SysTarget.ExecutorBin != "" {
cfg.ExecutorBin = ""
}
- if !osutil.IsExist(cfg.FuzzerBin) {
- return fmt.Errorf("bad config syzkaller param: can't find %v", cfg.FuzzerBin)
- }
if !osutil.IsExist(cfg.ExecprogBin) {
return fmt.Errorf("bad config syzkaller param: can't find %v", cfg.ExecprogBin)
}
diff --git a/pkg/report/fuchsia.go b/pkg/report/fuchsia.go
index e5c2d72d1..51d200251 100644
--- a/pkg/report/fuchsia.go
+++ b/pkg/report/fuchsia.go
@@ -54,7 +54,7 @@ func ctorFuchsia(cfg *config) (reporterImpl, []string, error) {
ctx.obj = filepath.Join(ctx.kernelObj, ctx.target.KernelObject)
}
suppressions := []string{
- "fatal exception: process /tmp/syz-fuzzer", // OOM presumably
+ "fatal exception: process /tmp/syz-executor", // OOM presumably
}
return ctx, suppressions, nil
}
diff --git a/pkg/report/linux.go b/pkg/report/linux.go
index acf0d6f79..26cb5461f 100644
--- a/pkg/report/linux.go
+++ b/pkg/report/linux.go
@@ -126,11 +126,11 @@ func ctorLinux(cfg *config) (reporterImpl, []string, error) {
"panic: failed to create temp dir",
"fatal error: unexpected signal during runtime execution", // presubmably OOM turned into SIGBUS
"signal SIGBUS: bus error", // presubmably OOM turned into SIGBUS
- "Out of memory: Kill process .* \\(syz-fuzzer\\)",
+ "Out of memory: Kill process .* \\(syz-executor\\)",
"Out of memory: Kill process .* \\(sshd\\)",
- "Killed process .* \\(syz-fuzzer\\)",
+ "Killed process .* \\(syz-executor\\)",
"Killed process .* \\(sshd\\)",
- "lowmemorykiller: Killing 'syz-fuzzer'",
+ "lowmemorykiller: Killing 'syz-executor'",
"lowmemorykiller: Killing 'sshd'",
"INIT: PANIC: segmentation violation!",
"\\*\\*\\* stack smashing detected \\*\\*\\*: terminated",
diff --git a/pkg/report/testdata/fuchsia/report/6 b/pkg/report/testdata/fuchsia/report/6
index 44993f4a8..2813e369e 100644
--- a/pkg/report/testdata/fuchsia/report/6
+++ b/pkg/report/testdata/fuchsia/report/6
@@ -7,7 +7,7 @@ by pkg/report, so we put the panic message here.
ZIRCON KERNEL PANIC
-[00131.346] 01102.01116> <== fatal exception: process /tmp/syz-fuzzer[31717] thread pthread_t:0x1184f772cb38[61384]
+[00131.346] 01102.01116> <== fatal exception: process /tmp/syz-executor[31717] thread pthread_t:0x1184f772cb38[61384]
[00131.346] 01102.01116> <== fatal page fault, PC at 0xd8af19736ef
[00131.346] 01102.01116> CS: 0 RIP: 0xd8af19736ef EFL: 0x10246 CR2: 0x6fe5cd59a000
[00131.346] 01102.01116> RAX: 0x6fe5cd59a000 RBX: 0x6ef13ea16400 RCX: 0xd8af2070520 RDX: 0xd8af386f0a0
@@ -36,18 +36,18 @@ ZIRCON KERNEL PANIC
[00131.375] 01102.01116> dso: id=63914be467c5f24aad721d5d496d022559a0562d base=0x77ed616e3000 name=libc.so
[00131.375] 01102.01116> dso: id=48b429c1159afb653a51dd253346e51e9844197b base=0x755a0bf72000 name=<vDSO>
[00131.375] 01102.01116> dso: id=773627f59f0eab9eece83b31d05d685e001bd9f2 base=0x65ccdbd16000 name=libfdio.so
-[00131.375] 01102.01116> dso: id=1496e1863bc310a7322542c41969d8ca90d92878 base=0xd8af0615000 name=app:/tmp/syz-fuzzer
-[00131.375] 01102.01116> bt#01: pc 0xd8af19736ef sp 0x6ef13e3cf6b8 (app:/tmp/syz-fuzzer,0x135e6ef)
-[00131.375] 01102.01116> bt#02: pc 0xd8af1974ae5 sp 0x6ef13e3cf6c8 (app:/tmp/syz-fuzzer,0x135fae5)
-[00131.375] 01102.01116> bt#03: pc 0xd8af1a30ba2 sp 0x6ef13e3cf6d8 (app:/tmp/syz-fuzzer,0x141bba2)
-[00131.375] 01102.01116> bt#04: pc 0xd8af1a311c2 sp 0x6ef13e3cf6e8 (app:/tmp/syz-fuzzer,0x141c1c2)
-[00131.375] 01102.01116> bt#05: pc 0xd8af1a36209 sp 0x6ef13e3cf6f8 (app:/tmp/syz-fuzzer,0x1421209)
-[00131.375] 01102.01116> bt#06: pc 0xd8af1a2d8cc sp 0x6ef13e3cf708 (app:/tmp/syz-fuzzer,0x14188cc)
-[00131.375] 01102.01116> bt#07: pc 0xd8af1a2d9ac sp 0x6ef13e3cf718 (app:/tmp/syz-fuzzer,0x14189ac)
-[00131.375] 01102.01116> bt#08: pc 0xd8af1ccc289 sp 0x6ef13e3cf728 (app:/tmp/syz-fuzzer,0x16b7289)
-[00131.375] 01102.01116> bt#09: pc 0xd8af1cc9320 sp 0x6ef13e3cf738 (app:/tmp/syz-fuzzer,0x16b4320)
-[00131.375] 01102.01116> bt#10: pc 0xd8af1d16e5f sp 0x6ef13e3cf748 (app:/tmp/syz-fuzzer,0x1701e5f)
-[00131.375] 01102.01116> bt#11: pc 0xd8af1d1699c sp 0x6ef13e3cf758 (app:/tmp/syz-fuzzer,0x170199c)
-[00131.375] 01102.01116> bt#12: pc 0xd8af1d158bf sp 0x6ef13e3cf768 (app:/tmp/syz-fuzzer,0x17008bf)
-[00131.375] 01102.01116> bt#13: pc 0xd8af19ba1a1 sp 0x6ef13e3cf778 (app:/tmp/syz-fuzzer,0x13a51a1)
+[00131.375] 01102.01116> dso: id=1496e1863bc310a7322542c41969d8ca90d92878 base=0xd8af0615000 name=app:/tmp/syz-executor
+[00131.375] 01102.01116> bt#01: pc 0xd8af19736ef sp 0x6ef13e3cf6b8 (app:/tmp/syz-executor,0x135e6ef)
+[00131.375] 01102.01116> bt#02: pc 0xd8af1974ae5 sp 0x6ef13e3cf6c8 (app:/tmp/syz-executor,0x135fae5)
+[00131.375] 01102.01116> bt#03: pc 0xd8af1a30ba2 sp 0x6ef13e3cf6d8 (app:/tmp/syz-executor,0x141bba2)
+[00131.375] 01102.01116> bt#04: pc 0xd8af1a311c2 sp 0x6ef13e3cf6e8 (app:/tmp/syz-executor,0x141c1c2)
+[00131.375] 01102.01116> bt#05: pc 0xd8af1a36209 sp 0x6ef13e3cf6f8 (app:/tmp/syz-executor,0x1421209)
+[00131.375] 01102.01116> bt#06: pc 0xd8af1a2d8cc sp 0x6ef13e3cf708 (app:/tmp/syz-executor,0x14188cc)
+[00131.375] 01102.01116> bt#07: pc 0xd8af1a2d9ac sp 0x6ef13e3cf718 (app:/tmp/syz-executor,0x14189ac)
+[00131.375] 01102.01116> bt#08: pc 0xd8af1ccc289 sp 0x6ef13e3cf728 (app:/tmp/syz-executor,0x16b7289)
+[00131.375] 01102.01116> bt#09: pc 0xd8af1cc9320 sp 0x6ef13e3cf738 (app:/tmp/syz-executor,0x16b4320)
+[00131.375] 01102.01116> bt#10: pc 0xd8af1d16e5f sp 0x6ef13e3cf748 (app:/tmp/syz-executor,0x1701e5f)
+[00131.375] 01102.01116> bt#11: pc 0xd8af1d1699c sp 0x6ef13e3cf758 (app:/tmp/syz-executor,0x170199c)
+[00131.375] 01102.01116> bt#12: pc 0xd8af1d158bf sp 0x6ef13e3cf768 (app:/tmp/syz-executor,0x17008bf)
+[00131.375] 01102.01116> bt#13: pc 0xd8af19ba1a1 sp 0x6ef13e3cf778 (app:/tmp/syz-executor,0x13a51a1)
[00131.375] 01102.01116> bt#14: end
diff --git a/syz-manager/last_executing.go b/pkg/rpcserver/last_executing.go
index 08d2a1d88..341ae6534 100644
--- a/syz-manager/last_executing.go
+++ b/pkg/rpcserver/last_executing.go
@@ -1,7 +1,7 @@
// Copyright 2024 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-package main
+package rpcserver
import (
"sort"
@@ -17,6 +17,7 @@ type LastExecuting struct {
}
type ExecRecord struct {
+ ID int
Proc int
Prog []byte
Time time.Duration
@@ -31,9 +32,10 @@ func MakeLastExecuting(procs, count int) *LastExecuting {
}
// Note execution of the 'prog' on 'proc' at time 'now'.
-func (last *LastExecuting) Note(proc int, prog []byte, now time.Duration) {
+func (last *LastExecuting) Note(id, proc int, prog []byte, now time.Duration) {
pos := &last.positions[proc]
last.procs[proc*last.count+*pos] = ExecRecord{
+ ID: id,
Proc: proc,
Prog: prog,
Time: now,
diff --git a/pkg/rpcserver/last_executing_test.go b/pkg/rpcserver/last_executing_test.go
new file mode 100644
index 000000000..c9f3cc2bf
--- /dev/null
+++ b/pkg/rpcserver/last_executing_test.go
@@ -0,0 +1,56 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package rpcserver
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestLastExecutingEmpty(t *testing.T) {
+ last := MakeLastExecuting(10, 10)
+ assert.Empty(t, last.Collect())
+}
+
+func TestLastExecuting(t *testing.T) {
+ last := MakeLastExecuting(10, 3)
+ last.Note(1, 0, []byte("prog1"), 1)
+
+ last.Note(2, 1, []byte("prog2"), 2)
+ last.Note(3, 1, []byte("prog3"), 3)
+
+ last.Note(4, 3, []byte("prog4"), 4)
+ last.Note(5, 3, []byte("prog5"), 5)
+ last.Note(6, 3, []byte("prog6"), 6)
+
+ last.Note(7, 7, []byte("prog7"), 7)
+ last.Note(8, 7, []byte("prog8"), 8)
+ last.Note(9, 7, []byte("prog9"), 9)
+ last.Note(10, 7, []byte("prog10"), 10)
+ last.Note(11, 7, []byte("prog11"), 11)
+
+ last.Note(12, 9, []byte("prog12"), 12)
+
+ last.Note(13, 8, []byte("prog13"), 13)
+
+ assert.Equal(t, last.Collect(), []ExecRecord{
+ {ID: 1, Proc: 0, Prog: []byte("prog1"), Time: 12},
+
+ {ID: 2, Proc: 1, Prog: []byte("prog2"), Time: 11},
+ {ID: 3, Proc: 1, Prog: []byte("prog3"), Time: 10},
+
+ {ID: 4, Proc: 3, Prog: []byte("prog4"), Time: 9},
+ {ID: 5, Proc: 3, Prog: []byte("prog5"), Time: 8},
+ {ID: 6, Proc: 3, Prog: []byte("prog6"), Time: 7},
+
+ {ID: 9, Proc: 7, Prog: []byte("prog9"), Time: 4},
+ {ID: 10, Proc: 7, Prog: []byte("prog10"), Time: 3},
+ {ID: 11, Proc: 7, Prog: []byte("prog11"), Time: 2},
+
+ {ID: 12, Proc: 9, Prog: []byte("prog12"), Time: 1},
+
+ {ID: 13, Proc: 8, Prog: []byte("prog13"), Time: 0},
+ })
+}
diff --git a/pkg/rpcserver/local.go b/pkg/rpcserver/local.go
new file mode 100644
index 000000000..da1de1fc0
--- /dev/null
+++ b/pkg/rpcserver/local.go
@@ -0,0 +1,138 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package rpcserver
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "time"
+
+ "github.com/google/syzkaller/pkg/cover"
+ "github.com/google/syzkaller/pkg/flatrpc"
+ "github.com/google/syzkaller/pkg/fuzzer/queue"
+ "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/osutil"
+ "github.com/google/syzkaller/pkg/signal"
+ "github.com/google/syzkaller/prog"
+)
+
+type LocalConfig struct {
+ Config
+ // syz-executor binary.
+ Executor string
+ // Temp dir where to run executor process, it's up to the caller to clean it up if necessary.
+ Dir string
+ // Handle ctrl+C and exit.
+ HandleInterrupts bool
+ // Run executor under gdb.
+ GDB bool
+ // RunLocal exits when the context is cancelled.
+ Context context.Context
+ MachineChecked func(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source
+}
+
+func RunLocal(cfg *LocalConfig) error {
+ cfg.RPC = ":0"
+ cfg.VMLess = true
+ cfg.PrintMachineCheck = log.V(1)
+ ctx := &local{
+ cfg: cfg,
+ setupDone: make(chan bool),
+ }
+ serv, err := newImpl(&cfg.Config, ctx)
+ if err != nil {
+ return err
+ }
+ defer serv.Close()
+ ctx.serv = serv
+ // setupDone synchronizes assignment to ctx.serv and read of ctx.serv in MachineChecked
+ // for the race detector b/c it does not understand the synchronization via TCP socket connect/accept.
+ close(ctx.setupDone)
+
+ bin := cfg.Executor
+ args := []string{"runner", "local", "localhost", fmt.Sprint(serv.Port)}
+ if cfg.GDB {
+ bin = "gdb"
+ args = append([]string{
+ "--return-child-result",
+ "--ex=handle SIGPIPE nostop",
+ "--args",
+ cfg.Executor,
+ }, args...)
+ }
+ cmd := exec.Command(bin, args...)
+ cmd.Dir = cfg.Dir
+ if cfg.Debug || cfg.GDB {
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ }
+ if cfg.GDB {
+ cmd.Stdin = os.Stdin
+ }
+ if err := cmd.Start(); err != nil {
+ return fmt.Errorf("failed to start executor: %w", err)
+ }
+ res := make(chan error, 1)
+ go func() { res <- cmd.Wait() }()
+ shutdown := make(chan struct{})
+ if cfg.HandleInterrupts {
+ osutil.HandleInterrupts(shutdown)
+ }
+ var cmdErr error
+ select {
+ case <-shutdown:
+ case <-cfg.Context.Done():
+ case err := <-res:
+ cmdErr = fmt.Errorf("executor process exited: %w", err)
+ }
+ if cmdErr == nil {
+ cmd.Process.Kill()
+ <-res
+ }
+ if !cfg.HandleInterrupts {
+ // If the executor has crashed early, reply to all remaining requests to unblock tests.
+ loop:
+ for {
+ req := serv.execSource.Next()
+ if req == nil {
+ select {
+ case <-cfg.Context.Done():
+ break loop
+ default:
+ time.Sleep(time.Millisecond)
+ continue loop
+ }
+ }
+ req.Done(&queue.Result{Status: queue.ExecFailure, Err: errors.New("executor crashed")})
+ }
+ }
+ return cmdErr
+}
+
+type local struct {
+ cfg *LocalConfig
+ serv *Server
+ setupDone chan bool
+}
+
+func (ctx *local) MachineChecked(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source {
+ <-ctx.setupDone
+ ctx.serv.TriagedCorpus()
+ return ctx.cfg.MachineChecked(features, syscalls)
+}
+
+func (ctx *local) BugFrames() ([]string, []string) {
+ return nil, nil
+}
+
+func (ctx *local) MaxSignal() signal.Signal {
+ return nil
+}
+
+func (ctx *local) CoverageFilter(modules []*cover.KernelModule) []uint64 {
+ return nil
+}
diff --git a/syz-manager/rpc.go b/pkg/rpcserver/rpcserver.go
index 6e7e3b710..1b090126b 100644
--- a/syz-manager/rpc.go
+++ b/pkg/rpcserver/rpcserver.go
@@ -1,13 +1,15 @@
-// Copyright 2018 syzkaller project authors. All rights reserved.
+// Copyright 2024 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-package main
+package rpcserver
import (
"bytes"
"errors"
"fmt"
+ "maps"
"math/rand"
+ "os"
"slices"
"sort"
"strings"
@@ -16,10 +18,8 @@ import (
"time"
"github.com/google/syzkaller/pkg/cover"
- "github.com/google/syzkaller/pkg/cover/backend"
"github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
- "github.com/google/syzkaller/pkg/ipc"
"github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/mgrconfig"
"github.com/google/syzkaller/pkg/osutil"
@@ -27,14 +27,36 @@ import (
"github.com/google/syzkaller/pkg/stats"
"github.com/google/syzkaller/pkg/vminfo"
"github.com/google/syzkaller/prog"
+ "github.com/google/syzkaller/sys/targets"
)
-type RPCServer struct {
- mgr RPCManagerView
- cfg *mgrconfig.Config
- target *prog.Target
- checker *vminfo.Checker
- port int
+type Config struct {
+ vminfo.Config
+ RPC string
+ VMLess bool
+ PrintMachineCheck bool
+ Procs int
+ Slowdown int
+}
+
+type Manager interface {
+ MaxSignal() signal.Signal
+ BugFrames() (leaks []string, races []string)
+ MachineChecked(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source
+ CoverageFilter(modules []*cover.KernelModule) []uint64
+}
+
+type Server struct {
+ Port int
+ StatExecs *stats.Val
+ StatNumFuzzing *stats.Val
+
+ cfg *Config
+ mgr Manager
+ serv *flatrpc.Serv
+ target *prog.Target
+ timeouts targets.Timeouts
+ checker *vminfo.Checker
infoOnce sync.Once
checkDone atomic.Bool
@@ -44,29 +66,27 @@ type RPCServer struct {
setupFeatures flatrpc.Feature
modules []*cover.KernelModule
canonicalModules *cover.Canonicalizer
- execCoverFilter []uint64 // includes both coverage and comparison PCs
- coverFilter map[uint64]struct{} // includes only coverage PCs
+ coverFilter []uint64
- mu sync.Mutex
- runners map[string]*Runner
- execSource queue.Source
- checkLeaks bool
+ mu sync.Mutex
+ runners map[string]*Runner
+ info map[string]VMState
+ execSource queue.Source
+ triagedCorpus atomic.Bool
- statNumFuzzing *stats.Val
- statExecs *stats.Val
statExecRetries *stats.Val
statExecutorRestarts *stats.Val
statExecBufferTooSmall *stats.Val
statVMRestarts *stats.Val
statNoExecRequests *stats.Val
statNoExecDuration *stats.Val
- statCoverFiltered *stats.Val
}
type Runner struct {
stopped bool
finished chan bool
injectExec chan<- bool
+ infoc chan chan []byte
conn *flatrpc.Conn
machineInfo []byte
canonicalizer *cover.CanonicalizerInstance
@@ -77,33 +97,48 @@ type Runner struct {
rnd *rand.Rand
}
-type BugFrames struct {
- memoryLeaks []string
- dataRaces []string
-}
-
-// RPCManagerView restricts interface between RPCServer and Manager.
-type RPCManagerView interface {
- currentBugFrames() BugFrames
- maxSignal() signal.Signal
- machineChecked(features flatrpc.Feature, enabledSyscalls map[*prog.Syscall]bool,
- opts flatrpc.ExecOpts) queue.Source
+func New(cfg *mgrconfig.Config, mgr Manager, debug bool) (*Server, error) {
+ sandbox, err := flatrpc.SandboxToFlags(cfg.Sandbox)
+ if err != nil {
+ return nil, err
+ }
+ return newImpl(&Config{
+ Config: vminfo.Config{
+ Target: cfg.Target,
+ Features: flatrpc.AllFeatures,
+ Syscalls: cfg.Syscalls,
+ Debug: debug,
+ Cover: cfg.Cover,
+ Sandbox: sandbox,
+ SandboxArg: cfg.SandboxArg,
+ },
+ RPC: cfg.RPC,
+ VMLess: cfg.VMLess,
+ PrintMachineCheck: true,
+ Procs: cfg.Procs,
+ Slowdown: cfg.Timeouts.Slowdown,
+ }, mgr)
}
-func startRPCServer(mgr *Manager) (*RPCServer, error) {
- checker := vminfo.New(mgr.cfg)
+func newImpl(cfg *Config, mgr Manager) (*Server, error) {
+ cfg.Procs = min(cfg.Procs, prog.MaxPids)
+ checker := vminfo.New(&cfg.Config)
baseSource := queue.DynamicSource(checker)
- serv := &RPCServer{
+ serv := &Server{
+ cfg: cfg,
mgr: mgr,
- cfg: mgr.cfg,
- target: mgr.target,
+ target: cfg.Target,
+ timeouts: targets.Get(cfg.Target.OS, cfg.Target.Arch).Timeouts(cfg.Slowdown),
runners: make(map[string]*Runner),
+ info: make(map[string]VMState),
checker: checker,
baseSource: baseSource,
execSource: queue.Retry(baseSource),
- statExecs: mgr.statExecs,
- statNumFuzzing: stats.Create("fuzzing VMs", "Number of VMs that are currently fuzzing",
- stats.Console),
+
+ StatExecs: stats.Create("exec total", "Total test program executions",
+ stats.Console, stats.Rate{}, stats.Prometheus("syz_exec_total")),
+ StatNumFuzzing: stats.Create("fuzzing VMs", "Number of VMs that are currently fuzzing",
+ stats.Console, stats.Link("/vms")),
statExecRetries: stats.Create("exec retries",
"Number of times a test program was restarted because the first run failed",
stats.Rate{}, stats.Graph("executor")),
@@ -117,20 +152,77 @@ func startRPCServer(mgr *Manager) (*RPCServer, error) {
"Number of times fuzzer was stalled with no exec requests", stats.Rate{}),
statNoExecDuration: stats.Create("no exec duration",
"Total duration fuzzer was stalled with no exec requests (ns/sec)", stats.Rate{}),
- statCoverFiltered: stats.Create("filtered coverage", "", stats.NoGraph),
}
- s, err := flatrpc.ListenAndServe(mgr.cfg.RPC, serv.handleConn)
+ s, err := flatrpc.ListenAndServe(cfg.RPC, serv.handleConn)
if err != nil {
return nil, err
}
- baseSource.Store(serv.checker)
-
- log.Logf(0, "serving rpc on tcp://%v", s.Addr)
- serv.port = s.Addr.Port
+ serv.serv = s
+ serv.Port = s.Addr.Port
return serv, nil
}
-func (serv *RPCServer) handleConn(conn *flatrpc.Conn) {
+func (serv *Server) Close() error {
+ return serv.serv.Close()
+}
+
+type VMState struct {
+ State int
+ Timestamp time.Time
+}
+
+const (
+ StateOffline = iota
+ StateBooting
+ StateFuzzing
+ StateStopping
+)
+
+func (serv *Server) VMState() map[string]VMState {
+ serv.mu.Lock()
+ defer serv.mu.Unlock()
+ return maps.Clone(serv.info)
+}
+
+func (serv *Server) MachineInfo(name string) []byte {
+ serv.mu.Lock()
+ runner := serv.runners[name]
+ if runner != nil && (runner.conn == nil || runner.stopped) {
+ runner = nil
+ }
+ serv.mu.Unlock()
+ if runner == nil {
+ return []byte("VM is not alive")
+ }
+ return runner.machineInfo
+}
+
+func (serv *Server) RunnerStatus(name string) []byte {
+ serv.mu.Lock()
+ runner := serv.runners[name]
+ if runner != nil && (runner.conn == nil || runner.stopped) {
+ runner = nil
+ }
+ serv.mu.Unlock()
+ if runner == nil {
+ return []byte("VM is not alive")
+ }
+ resc := make(chan []byte, 1)
+ timeout := time.After(time.Minute)
+ select {
+ case runner.infoc <- resc:
+ case <-timeout:
+ return []byte("VM loop is not responding")
+ }
+ select {
+ case res := <-resc:
+ return res
+ case <-timeout:
+ return []byte("VM is not responding")
+ }
+}
+
+func (serv *Server) handleConn(conn *flatrpc.Conn) {
name, machineInfo, canonicalizer, err := serv.handshake(conn)
if err != nil {
log.Logf(1, "%v", err)
@@ -139,10 +231,10 @@ func (serv *RPCServer) handleConn(conn *flatrpc.Conn) {
if serv.cfg.VMLess {
// There is no VM loop, so minic what it would do.
- serv.createInstance(name, nil)
+ serv.CreateInstance(name, nil)
defer func() {
- serv.stopFuzzing(name)
- serv.shutdownInstance(name, false)
+ serv.StopFuzzing(name)
+ serv.ShutdownInstance(name, true)
}()
}
@@ -153,14 +245,14 @@ func (serv *RPCServer) handleConn(conn *flatrpc.Conn) {
log.Logf(2, "VM %v shut down before connect", name)
return
}
+ serv.info[name] = VMState{StateFuzzing, time.Now()}
runner.conn = conn
runner.machineInfo = machineInfo
runner.canonicalizer = canonicalizer
- checkLeaks := serv.checkLeaks
serv.mu.Unlock()
defer close(runner.finished)
- if checkLeaks {
+ if serv.triagedCorpus.Load() {
if err := runner.sendStartLeakChecks(); err != nil {
log.Logf(2, "%v", err)
return
@@ -171,25 +263,27 @@ func (serv *RPCServer) handleConn(conn *flatrpc.Conn) {
log.Logf(2, "runner %v: %v", name, err)
}
-func (serv *RPCServer) handshake(conn *flatrpc.Conn) (string, []byte, *cover.CanonicalizerInstance, error) {
- connectReqRaw, err := flatrpc.Recv[flatrpc.ConnectRequestRaw](conn)
+func (serv *Server) handshake(conn *flatrpc.Conn) (string, []byte, *cover.CanonicalizerInstance, error) {
+ connectReq, err := flatrpc.Recv[*flatrpc.ConnectRequestRaw](conn)
if err != nil {
return "", nil, nil, err
}
- connectReq := connectReqRaw.UnPack()
- log.Logf(1, "fuzzer %v connected", connectReq.Name)
+ log.Logf(1, "runner %v connected", connectReq.Name)
if !serv.cfg.VMLess {
checkRevisions(connectReq, serv.cfg.Target)
}
serv.statVMRestarts.Add(1)
- bugFrames := serv.mgr.currentBugFrames()
+ leaks, races := serv.mgr.BugFrames()
connectReply := &flatrpc.ConnectReply{
- Debug: *flagDebug,
- Procs: int32(serv.cfg.Procs),
- Slowdown: int32(serv.cfg.Timeouts.Slowdown),
- LeakFrames: bugFrames.memoryLeaks,
- RaceFrames: bugFrames.dataRaces,
+ Debug: serv.cfg.Debug,
+ Cover: serv.cfg.Cover,
+ Procs: int32(serv.cfg.Procs),
+ Slowdown: int32(serv.timeouts.Slowdown),
+ SyscallTimeoutMs: int32(serv.timeouts.Syscall / time.Millisecond),
+ ProgramTimeoutMs: int32(serv.timeouts.Program / time.Millisecond),
+ LeakFrames: leaks,
+ RaceFrames: races,
}
connectReply.Files = serv.checker.RequiredFiles()
if serv.checkDone.Load() {
@@ -197,17 +291,16 @@ func (serv *RPCServer) handshake(conn *flatrpc.Conn) (string, []byte, *cover.Can
} else {
connectReply.Files = append(connectReply.Files, serv.checker.CheckFiles()...)
connectReply.Globs = serv.target.RequiredGlobs()
- connectReply.Features = flatrpc.AllFeatures
+ connectReply.Features = serv.cfg.Features
}
if err := flatrpc.Send(conn, connectReply); err != nil {
return "", nil, nil, err
}
- infoReqRaw, err := flatrpc.Recv[flatrpc.InfoRequestRaw](conn)
+ infoReq, err := flatrpc.Recv[*flatrpc.InfoRequestRaw](conn)
if err != nil {
return "", nil, nil, err
}
- infoReq := infoReqRaw.UnPack()
modules, machineInfo, err := serv.checker.MachineInfo(infoReq.Files)
if err != nil {
log.Logf(0, "parsing of machine info failed: %v", err)
@@ -227,11 +320,7 @@ func (serv *RPCServer) handshake(conn *flatrpc.Conn) (string, []byte, *cover.Can
serv.infoOnce.Do(func() {
serv.modules = modules
serv.canonicalModules = cover.NewCanonicalizer(modules, serv.cfg.Cover)
- var err error
- serv.execCoverFilter, serv.coverFilter, err = createCoverageFilter(serv.cfg, modules)
- if err != nil {
- log.Fatalf("failed to init coverage filter: %v", err)
- }
+ serv.coverFilter = serv.mgr.CoverageFilter(modules)
globs := make(map[string][]string)
for _, glob := range infoReq.Globs {
globs[glob.Name] = glob.Files
@@ -252,7 +341,7 @@ func (serv *RPCServer) handshake(conn *flatrpc.Conn) (string, []byte, *cover.Can
canonicalizer := serv.canonicalModules.NewInstance(modules)
infoReply := &flatrpc.InfoReply{
- CoverFilter: canonicalizer.Decanonicalize(serv.execCoverFilter),
+ CoverFilter: canonicalizer.Decanonicalize(serv.coverFilter),
}
if err := flatrpc.Send(conn, infoReply); err != nil {
return "", nil, nil, err
@@ -260,9 +349,9 @@ func (serv *RPCServer) handshake(conn *flatrpc.Conn) (string, []byte, *cover.Can
return connectReq.Name, machineInfo, canonicalizer, nil
}
-func (serv *RPCServer) connectionLoop(runner *Runner) error {
+func (serv *Server) connectionLoop(runner *Runner) error {
if serv.cfg.Cover {
- maxSignal := serv.mgr.maxSignal().ToRaw()
+ maxSignal := serv.mgr.MaxSignal().ToRaw()
for len(maxSignal) != 0 {
// Split coverage into batches to not grow the connection serialization
// buffer too much (we don't want to grow it larger than what will be needed
@@ -275,9 +364,30 @@ func (serv *RPCServer) connectionLoop(runner *Runner) error {
}
}
- serv.statNumFuzzing.Add(1)
- defer serv.statNumFuzzing.Add(-1)
+ serv.StatNumFuzzing.Add(1)
+ defer serv.StatNumFuzzing.Add(-1)
+ var infoc chan []byte
+ defer func() {
+ if infoc != nil {
+ infoc <- []byte("VM has crashed")
+ }
+ }()
for {
+ if infoc == nil {
+ select {
+ case infoc = <-runner.infoc:
+ msg := &flatrpc.HostMessage{
+ Msg: &flatrpc.HostMessages{
+ Type: flatrpc.HostMessagesRawStateRequest,
+ Value: &flatrpc.StateRequest{},
+ },
+ }
+ if err := flatrpc.Send(runner.conn, msg); err != nil {
+ return err
+ }
+ default:
+ }
+ }
for len(runner.requests)-len(runner.executing) < 2*serv.cfg.Procs {
req := serv.execSource.Next()
if req == nil {
@@ -289,23 +399,32 @@ func (serv *RPCServer) connectionLoop(runner *Runner) error {
}
if len(runner.requests) == 0 {
// The runner has not requests at all, so don't wait to receive anything from it.
- // This is only possible during the initial checking.
time.Sleep(10 * time.Millisecond)
continue
}
- raw, err := flatrpc.Recv[flatrpc.ExecutorMessageRaw](runner.conn)
+ raw, err := flatrpc.Recv[*flatrpc.ExecutorMessageRaw](runner.conn)
if err != nil {
return err
}
- unpacked := raw.UnPack()
- if unpacked.Msg == nil || unpacked.Msg.Value == nil {
+ if raw.Msg == nil || raw.Msg.Value == nil {
return errors.New("received no message")
}
- switch msg := raw.UnPack().Msg.Value.(type) {
+ switch msg := raw.Msg.Value.(type) {
case *flatrpc.ExecutingMessage:
err = serv.handleExecutingMessage(runner, msg)
case *flatrpc.ExecResult:
err = serv.handleExecResult(runner, msg)
+ case *flatrpc.StateResult:
+ if infoc != nil {
+ buf := new(bytes.Buffer)
+ fmt.Fprintf(buf, "pending requests on the VM:")
+ for id := range runner.requests {
+ fmt.Fprintf(buf, " %v", id)
+ }
+ fmt.Fprintf(buf, "\n\n")
+ infoc <- append(buf.Bytes(), msg.Data...)
+ infoc = nil
+ }
default:
return fmt.Errorf("received unknown message type %T", msg)
}
@@ -315,19 +434,10 @@ func (serv *RPCServer) connectionLoop(runner *Runner) error {
}
}
-func (serv *RPCServer) sendRequest(runner *Runner, req *queue.Request) error {
- if err := validateRequest(req); err != nil {
+func (serv *Server) sendRequest(runner *Runner, req *queue.Request) error {
+ if err := req.Validate(); err != nil {
panic(err)
}
- progData, err := req.Prog.SerializeForExec()
- if err != nil {
- // It's bad if we systematically fail to serialize programs,
- // but so far we don't have a better handling than counting this.
- // This error is observed a lot on the seeded syz_mount_image calls.
- serv.statExecBufferTooSmall.Add(1)
- req.Done(&queue.Result{Status: queue.ExecFailure})
- return nil
- }
runner.nextRequestID++
id := runner.nextRequestID
var flags flatrpc.RequestFlag
@@ -344,21 +454,46 @@ func (serv *RPCServer) sendRequest(runner *Runner, req *queue.Request) error {
// Do not let too much state accumulate.
const restartIn = 600
resetFlags := flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover | flatrpc.ExecFlagCollectComps
- if serv.cfg.Experimental.ResetAccState || req.ExecOpts.ExecFlags&resetFlags != 0 && runner.rnd.Intn(restartIn) == 0 {
- flags |= flatrpc.RequestFlagResetState
+ opts := req.ExecOpts
+ if req.ExecOpts.ExecFlags&resetFlags != 0 && runner.rnd.Intn(restartIn) == 0 {
+ opts.EnvFlags |= flatrpc.ExecEnvResetState
+ }
+ if serv.cfg.Debug {
+ opts.EnvFlags |= flatrpc.ExecEnvDebug
+ }
+ var data []byte
+ if req.BinaryFile == "" {
+ progData, err := req.Prog.SerializeForExec()
+ if err != nil {
+ // It's bad if we systematically fail to serialize programs,
+ // but so far we don't have a better handling than counting this.
+ // This error is observed a lot on the seeded syz_mount_image calls.
+ serv.statExecBufferTooSmall.Add(1)
+ req.Done(&queue.Result{Status: queue.ExecFailure})
+ return nil
+ }
+ data = progData
+ } else {
+ flags |= flatrpc.RequestFlagIsBinary
+ fileData, err := os.ReadFile(req.BinaryFile)
+ if err != nil {
+ req.Done(&queue.Result{
+ Status: queue.ExecFailure,
+ Err: err,
+ })
+ return nil
+ }
+ data = fileData
}
- signalFilter := runner.canonicalizer.Decanonicalize(req.SignalFilter.ToRaw())
msg := &flatrpc.HostMessage{
Msg: &flatrpc.HostMessages{
Type: flatrpc.HostMessagesRawExecRequest,
Value: &flatrpc.ExecRequest{
- Id: id,
- ProgData: progData,
- Flags: flags,
- ExecOpts: &req.ExecOpts,
- SignalFilter: signalFilter,
- SignalFilterCall: int32(req.SignalFilterCall),
- AllSignal: allSignal,
+ Id: id,
+ ProgData: data,
+ Flags: flags,
+ ExecOpts: &opts,
+ AllSignal: allSignal,
},
},
}
@@ -366,7 +501,7 @@ func (serv *RPCServer) sendRequest(runner *Runner, req *queue.Request) error {
return flatrpc.Send(runner.conn, msg)
}
-func (serv *RPCServer) handleExecutingMessage(runner *Runner, msg *flatrpc.ExecutingMessage) error {
+func (serv *Server) handleExecutingMessage(runner *Runner, msg *flatrpc.ExecutingMessage) error {
req := runner.requests[msg.Id]
if req == nil {
return fmt.Errorf("can't find executing request %v", msg.Id)
@@ -375,7 +510,7 @@ func (serv *RPCServer) handleExecutingMessage(runner *Runner, msg *flatrpc.Execu
if proc < 0 || proc >= serv.cfg.Procs {
return fmt.Errorf("got bad proc id %v", proc)
}
- serv.statExecs.Add(1)
+ serv.StatExecs.Add(1)
if msg.Try == 0 {
if msg.WaitDuration != 0 {
serv.statNoExecRequests.Add(1)
@@ -386,7 +521,7 @@ func (serv *RPCServer) handleExecutingMessage(runner *Runner, msg *flatrpc.Execu
} else {
serv.statExecRetries.Add(1)
}
- runner.lastExec.Note(proc, req.Prog.Serialize(), osutil.MonotonicNano())
+ runner.lastExec.Note(int(msg.Id), proc, req.Prog.Serialize(), osutil.MonotonicNano())
select {
case runner.injectExec <- true:
default:
@@ -395,7 +530,7 @@ func (serv *RPCServer) handleExecutingMessage(runner *Runner, msg *flatrpc.Execu
return nil
}
-func (serv *RPCServer) handleExecResult(runner *Runner, msg *flatrpc.ExecResult) error {
+func (serv *Server) handleExecResult(runner *Runner, msg *flatrpc.ExecResult) error {
req := runner.requests[msg.Id]
if req == nil {
return fmt.Errorf("can't find executed request %v", msg.Id)
@@ -403,6 +538,12 @@ func (serv *RPCServer) handleExecResult(runner *Runner, msg *flatrpc.ExecResult)
delete(runner.requests, msg.Id)
delete(runner.executing, msg.Id)
if msg.Info != nil {
+ for len(msg.Info.Calls) < len(req.Prog.Calls) {
+ msg.Info.Calls = append(msg.Info.Calls, &flatrpc.CallInfo{
+ Error: 999,
+ })
+ }
+ msg.Info.Calls = msg.Info.Calls[:len(req.Prog.Calls)]
if msg.Info.Freshness == 0 {
serv.statExecutorRestarts.Add(1)
}
@@ -415,9 +556,17 @@ func (serv *RPCServer) handleExecResult(runner *Runner, msg *flatrpc.ExecResult)
call.Cover = runner.canonicalizer.Canonicalize(call.Cover)
call.Signal = runner.canonicalizer.Canonicalize(call.Signal)
}
- if msg.Info.Extra != nil {
+ if len(msg.Info.ExtraRaw) != 0 {
+ msg.Info.Extra = msg.Info.ExtraRaw[0]
+ for _, info := range msg.Info.ExtraRaw[1:] {
+ // All processing in the fuzzer later will convert signal/cover to maps and dedup,
+ // so there is little point in deduping here.
+ msg.Info.Extra.Cover = append(msg.Info.Extra.Cover, info.Cover...)
+ msg.Info.Extra.Signal = append(msg.Info.Extra.Signal, info.Signal...)
+ }
msg.Info.Extra.Cover = runner.canonicalizer.Canonicalize(msg.Info.Extra.Cover)
msg.Info.Extra.Signal = runner.canonicalizer.Canonicalize(msg.Info.Extra.Signal)
+ msg.Info.ExtraRaw = nil
}
}
status := queue.Success
@@ -437,25 +586,41 @@ func (serv *RPCServer) handleExecResult(runner *Runner, msg *flatrpc.ExecResult)
func checkRevisions(a *flatrpc.ConnectRequest, target *prog.Target) {
if target.Arch != a.Arch {
- log.Fatalf("mismatching target/executor arches: %v vs %v", target.Arch, a.Arch)
+ log.Fatalf("mismatching manager/executor arches: %v vs %v", target.Arch, a.Arch)
}
if prog.GitRevision != a.GitRevision {
- log.Fatalf("mismatching manager/fuzzer git revisions: %v vs %v",
+ log.Fatalf("mismatching manager/executor git revisions: %v vs %v",
prog.GitRevision, a.GitRevision)
}
if target.Revision != a.SyzRevision {
- log.Fatalf("mismatching manager/fuzzer system call descriptions: %v vs %v",
+ log.Fatalf("mismatching manager/executor system call descriptions: %v vs %v",
target.Revision, a.SyzRevision)
}
}
-func (serv *RPCServer) runCheck(checkFilesInfo []*flatrpc.FileInfo, checkFeatureInfo []*flatrpc.FeatureInfo) error {
+func (serv *Server) runCheck(checkFilesInfo []*flatrpc.FileInfo, checkFeatureInfo []*flatrpc.FeatureInfo) error {
enabledCalls, disabledCalls, features, checkErr := serv.checker.Run(checkFilesInfo, checkFeatureInfo)
enabledCalls, transitivelyDisabled := serv.target.TransitivelyEnabledCalls(enabledCalls)
// Note: need to print disbled syscalls before failing due to an error.
// This helps to debug "all system calls are disabled".
+ if serv.cfg.PrintMachineCheck {
+ serv.printMachineCheck(checkFilesInfo, enabledCalls, disabledCalls, transitivelyDisabled, features)
+ }
+ if checkErr != nil {
+ return checkErr
+ }
+ serv.enabledFeatures = features.Enabled()
+ serv.setupFeatures = features.NeedSetup()
+ newSource := serv.mgr.MachineChecked(serv.enabledFeatures, enabledCalls)
+ serv.baseSource.Store(newSource)
+ serv.checkDone.Store(true)
+ return nil
+}
+
+func (serv *Server) printMachineCheck(checkFilesInfo []*flatrpc.FileInfo, enabledCalls map[*prog.Syscall]bool,
+ disabledCalls, transitivelyDisabled map[*prog.Syscall]string, features vminfo.Features) {
buf := new(bytes.Buffer)
- if len(serv.cfg.EnabledSyscalls) != 0 || log.V(1) {
+ if len(serv.cfg.Syscalls) != 0 || log.V(1) {
if len(disabledCalls) != 0 {
var lines []string
for call, reason := range disabledCalls {
@@ -500,35 +665,12 @@ func (serv *RPCServer) runCheck(checkFilesInfo []*flatrpc.FileInfo, checkFeature
buf.WriteString(strings.Join(lines, ""))
fmt.Fprintf(buf, "\n")
log.Logf(0, "machine check:\n%s", buf.Bytes())
- if checkErr != nil {
- return checkErr
- }
- if len(enabledCalls) == 0 {
- return fmt.Errorf("all system calls are disabled")
- }
- serv.enabledFeatures = features.Enabled()
- serv.setupFeatures = features.NeedSetup()
- newSource := serv.mgr.machineChecked(serv.enabledFeatures, enabledCalls, serv.execOpts())
- serv.baseSource.Store(newSource)
- serv.checkDone.Store(true)
- return nil
}
-func validateRequest(req *queue.Request) error {
- err := req.Validate()
- if err != nil {
- return err
- }
- if req.BinaryFile != "" {
- // Currnetly it should only be done in tools/syz-runtest.
- return fmt.Errorf("binary file execution is not supported")
- }
- return nil
-}
-
-func (serv *RPCServer) createInstance(name string, injectExec chan<- bool) {
+func (serv *Server) CreateInstance(name string, injectExec chan<- bool) {
runner := &Runner{
injectExec: injectExec,
+ infoc: make(chan chan []byte),
finished: make(chan bool),
requests: make(map[int64]*queue.Request),
executing: make(map[int64]bool),
@@ -540,26 +682,29 @@ func (serv *RPCServer) createInstance(name string, injectExec chan<- bool) {
panic(fmt.Sprintf("duplicate instance %s", name))
}
serv.runners[name] = runner
+ serv.info[name] = VMState{StateBooting, time.Now()}
serv.mu.Unlock()
}
// stopInstance prevents further request exchange requests.
// To make RPCServer fully forget an instance, shutdownInstance() must be called.
-func (serv *RPCServer) stopFuzzing(name string) {
+func (serv *Server) StopFuzzing(name string) {
serv.mu.Lock()
runner := serv.runners[name]
runner.stopped = true
conn := runner.conn
+ serv.info[name] = VMState{StateStopping, time.Now()}
serv.mu.Unlock()
if conn != nil {
conn.Close()
}
}
-func (serv *RPCServer) shutdownInstance(name string, crashed bool) ([]ExecRecord, []byte) {
+func (serv *Server) ShutdownInstance(name string, crashed bool) ([]ExecRecord, []byte) {
serv.mu.Lock()
runner := serv.runners[name]
delete(serv.runners, name)
+ serv.info[name] = VMState{StateOffline, time.Now()}
serv.mu.Unlock()
if runner.conn != nil {
// Wait for the connection goroutine to finish and stop touching data.
@@ -576,7 +721,7 @@ func (serv *RPCServer) shutdownInstance(name string, crashed bool) ([]ExecRecord
return runner.lastExec.Collect(), runner.machineInfo
}
-func (serv *RPCServer) distributeSignalDelta(plus, minus signal.Signal) {
+func (serv *Server) DistributeSignalDelta(plus, minus signal.Signal) {
plusRaw := plus.ToRaw()
minusRaw := minus.ToRaw()
serv.foreachRunnerAsync(func(runner *Runner) {
@@ -597,7 +742,8 @@ func (runner *Runner) sendSignalUpdate(plus, minus []uint64) error {
return flatrpc.Send(runner.conn, msg)
}
-func (serv *RPCServer) startLeakChecking() {
+func (serv *Server) TriagedCorpus() {
+ serv.triagedCorpus.Store(true)
serv.foreachRunnerAsync(func(runner *Runner) {
runner.sendStartLeakChecks()
})
@@ -616,7 +762,7 @@ func (runner *Runner) sendStartLeakChecks() error {
// foreachRunnerAsync runs callback fn for each connected runner asynchronously.
// If a VM has hanged w/o reading out the socket, we want to avoid blocking
// important goroutines on the send operations.
-func (serv *RPCServer) foreachRunnerAsync(fn func(runner *Runner)) {
+func (serv *Server) foreachRunnerAsync(fn func(runner *Runner)) {
serv.mu.Lock()
defer serv.mu.Unlock()
for _, runner := range serv.runners {
@@ -626,45 +772,6 @@ func (serv *RPCServer) foreachRunnerAsync(fn func(runner *Runner)) {
}
}
-func (serv *RPCServer) updateCoverFilter(newCover []uint64) {
- if len(newCover) == 0 || serv.coverFilter == nil {
- return
- }
- filtered := 0
- for _, pc := range newCover {
- pc = backend.PreviousInstructionPC(serv.cfg.SysTarget, serv.cfg.Type, pc)
- if _, ok := serv.coverFilter[pc]; ok {
- filtered++
- }
- }
- serv.statCoverFiltered.Add(filtered)
-}
-
-func (serv *RPCServer) execOpts() flatrpc.ExecOpts {
- env := ipc.FeaturesToFlags(serv.enabledFeatures, nil)
- if *flagDebug {
- env |= flatrpc.ExecEnvDebug
- }
- if serv.cfg.Cover {
- env |= flatrpc.ExecEnvSignal
- }
- sandbox, err := ipc.SandboxToFlags(serv.cfg.Sandbox)
- if err != nil {
- panic(fmt.Sprintf("failed to parse sandbox: %v", err))
- }
- env |= sandbox
-
- exec := flatrpc.ExecFlagThreaded
- if !serv.cfg.RawCover {
- exec |= flatrpc.ExecFlagDedupCover
- }
- return flatrpc.ExecOpts{
- EnvFlags: env,
- ExecFlags: exec,
- SandboxArg: serv.cfg.SandboxArg,
- }
-}
-
// addFallbackSignal computes simple fallback signal in cases we don't have real coverage signal.
// We use syscall number or-ed with returned errno value as signal.
// At least this gives us all combinations of syscall+errno.
diff --git a/pkg/runtest/executor_test.go b/pkg/runtest/executor_test.go
new file mode 100644
index 000000000..d6f9a8434
--- /dev/null
+++ b/pkg/runtest/executor_test.go
@@ -0,0 +1,131 @@
+// Copyright 2015 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package runtest
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "runtime"
+ "testing"
+ "time"
+
+ "github.com/google/syzkaller/pkg/csource"
+ "github.com/google/syzkaller/pkg/flatrpc"
+ "github.com/google/syzkaller/pkg/fuzzer/queue"
+ "github.com/google/syzkaller/pkg/image"
+ "github.com/google/syzkaller/pkg/osutil"
+ "github.com/google/syzkaller/pkg/testutil"
+ "github.com/google/syzkaller/prog"
+ _ "github.com/google/syzkaller/sys"
+ "github.com/google/syzkaller/sys/targets"
+)
+
+// TestExecutor runs all internal executor unit tests.
+// We do it here because we already build executor binary here.
+func TestExecutor(t *testing.T) {
+ t.Parallel()
+ for _, sysTarget := range targets.List[runtime.GOOS] {
+ sysTarget := targets.Get(runtime.GOOS, sysTarget.Arch)
+ t.Run(sysTarget.Arch, func(t *testing.T) {
+ if sysTarget.BrokenCompiler != "" {
+ t.Skipf("skipping, broken cross-compiler: %v", sysTarget.BrokenCompiler)
+ }
+ t.Parallel()
+ target, err := prog.GetTarget(runtime.GOOS, sysTarget.Arch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bin := csource.BuildExecutor(t, target, "../..")
+ // qemu-user may allow us to run some cross-arch binaries.
+ if _, err := osutil.RunCmd(time.Minute, "", bin, "test"); err != nil {
+ if sysTarget.Arch == runtime.GOARCH || sysTarget.VMArch == runtime.GOARCH {
+ t.Fatal(err)
+ }
+ t.Skipf("skipping, cross-arch binary failed: %v", err)
+ }
+ })
+ }
+}
+
+func TestZlib(t *testing.T) {
+ t.Parallel()
+ target, err := prog.GetTarget(targets.TestOS, targets.TestArch64)
+ if err != nil {
+ t.Fatal(err)
+ }
+ sysTarget := targets.Get(target.OS, target.Arch)
+ if sysTarget.BrokenCompiler != "" {
+ t.Skipf("skipping, broken cross-compiler: %v", sysTarget.BrokenCompiler)
+ }
+ executor := csource.BuildExecutor(t, target, "../..")
+ source := queue.Plain()
+ startRpcserver(t, target, executor, source)
+ r := rand.New(testutil.RandSource(t))
+ for i := 0; i < 10; i++ {
+ data := testutil.RandMountImage(r)
+ compressed := image.Compress(data)
+ text := fmt.Sprintf(`syz_compare_zlib(&(0x7f0000000000)="$%s", AUTO, &(0x7f0000800000)="$%s", AUTO)`,
+ image.EncodeB64(data), image.EncodeB64(compressed))
+ p, err := target.Deserialize([]byte(text), prog.Strict)
+ if err != nil {
+ t.Fatalf("failed to deserialize empty program: %v", err)
+ }
+ req := &queue.Request{
+ Prog: p,
+ ReturnError: true,
+ ReturnOutput: true,
+ ExecOpts: flatrpc.ExecOpts{
+ EnvFlags: flatrpc.ExecEnvSandboxNone,
+ },
+ }
+ source.Submit(req)
+ res := req.Wait(context.Background())
+ if res.Err != nil {
+ t.Fatalf("program execution failed: %v\n%s", res.Err, res.Output)
+ }
+ if res.Info.Calls[0].Error != 0 {
+ t.Fatalf("data comparison failed: %v\n%s", res.Info.Calls[0].Error, res.Output)
+ }
+ }
+}
+
+func TestExecutorCommonExt(t *testing.T) {
+ t.Parallel()
+ target, err := prog.GetTarget("test", "64_fork")
+ if err != nil {
+ t.Fatal(err)
+ }
+ sysTarget := targets.Get(target.OS, target.Arch)
+ if sysTarget.BrokenCompiler != "" {
+ t.Skipf("skipping, broken cross-compiler: %v", sysTarget.BrokenCompiler)
+ }
+ executor := csource.BuildExecutor(t, target, "../..", "-DSYZ_TEST_COMMON_EXT_EXAMPLE=1")
+ // The example setup_ext_test does:
+ // *(uint64*)(SYZ_DATA_OFFSET + 0x1234) = 0xbadc0ffee;
+ // The following program tests that that value is present at 0x1234.
+ test := `syz_compare(&(0x7f0000001234)="", 0x8, &(0x7f0000000000)=@blob="eeffc0ad0b000000", AUTO)`
+ p, err := target.Deserialize([]byte(test), prog.Strict)
+ if err != nil {
+ t.Fatal(err)
+ }
+ source := queue.Plain()
+ startRpcserver(t, target, executor, source)
+ req := &queue.Request{
+ Prog: p,
+ ReturnError: true,
+ ReturnOutput: true,
+ ExecOpts: flatrpc.ExecOpts{
+ EnvFlags: flatrpc.ExecEnvSandboxNone,
+ },
+ }
+ source.Submit(req)
+ res := req.Wait(context.Background())
+ if res.Err != nil {
+ t.Fatalf("program execution failed: %v\n%s", res.Err, res.Output)
+ }
+ if call := res.Info.Calls[0]; call.Flags&flatrpc.CallFlagFinished == 0 || call.Error != 0 {
+ t.Fatalf("bad call result: flags=%x errno=%v", call.Flags, call.Error)
+ }
+}
diff --git a/pkg/runtest/run.go b/pkg/runtest/run.go
index eb57582a3..cef85f6e9 100644
--- a/pkg/runtest/run.go
+++ b/pkg/runtest/run.go
@@ -22,25 +22,25 @@ import (
"sort"
"strconv"
"strings"
- "sync"
"github.com/google/syzkaller/pkg/csource"
"github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
- "github.com/google/syzkaller/pkg/ipc"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/targets"
- "golang.org/x/sync/errgroup"
)
type runRequest struct {
*queue.Request
+ sourceOpts *csource.Options
+ executor queue.Executor
ok int
failed int
err error
result *queue.Result
results *flatrpc.ProgInfo // the expected results
+ repeat int // only relevant for C tests
name string
broken string
@@ -58,7 +58,9 @@ type Context struct {
Debug bool
Tests string // prefix to match test file names
- executor queue.PlainQueue
+ executor *queue.DynamicOrderer
+ requests []*runRequest
+ buildSem chan bool
}
func (ctx *Context) log(msg string, args ...interface{}) {
@@ -66,87 +68,11 @@ func (ctx *Context) log(msg string, args ...interface{}) {
}
func (ctx *Context) Run() error {
- if ctx.Retries%2 == 0 {
- ctx.Retries++
- }
- progs := make(chan *runRequest, 1000)
- var eg errgroup.Group
- eg.Go(func() error {
- defer close(progs)
- return ctx.generatePrograms(progs)
- })
- done := make(chan *runRequest)
- eg.Go(func() error {
- return ctx.processResults(done)
- })
-
- var wg sync.WaitGroup
- for req := range progs {
- req := req
- if req.broken != "" || req.skip != "" {
- done <- req
- continue
- }
- var retry queue.DoneCallback
- retry = func(_ *queue.Request, res *queue.Result) bool {
- // The tests depend on timings and may be flaky, esp on overloaded/slow machines.
- // We don't want to fix this by significantly bumping all timeouts,
- // because if a program fails all the time with the default timeouts,
- // it will also fail during fuzzing. And we want to ensure that it's not the case.
- // So what we want is to tolerate episodic failures with the default timeouts.
- // To achieve this we run each test several times and ensure that it passes
- // in 50+% of cases (i.e. 1/1, 2/3, 3/5, 4/7, etc).
- // In the best case this allows to get off with just 1 test run.
-
- if res.Err != nil {
- req.err = res.Err
- return true
- }
- req.result = res
- err := checkResult(req)
- if err == nil {
- req.ok++
- } else {
- req.failed++
- req.err = err
- }
- if req.ok > req.failed {
- // There are more successful than failed runs.
- req.err = nil
- return true
- }
- // We need at least `failed - ok + 1` more runs <=> `failed + ok + need` in total,
- // which simplifies to `failed * 2 + 1`.
- if req.failed*2+1 <= ctx.Retries {
- // We can still retry the execution.
- req.OnDone(retry)
- ctx.executor.Submit(req.Request)
- return false
- }
- // Give up and fail on this request.
- return true
- }
- req.Request.OnDone(retry)
- ctx.executor.Submit(req.Request)
- wg.Add(1)
- go func() {
- defer wg.Done()
- req.Request.Wait(context.Background())
- done <- req
- }()
- }
- wg.Wait()
- close(done)
- return eg.Wait()
-}
-
-func (ctx *Context) Next() *queue.Request {
- return ctx.executor.Next()
-}
-
-func (ctx *Context) processResults(requests chan *runRequest) error {
+ ctx.buildSem = make(chan bool, runtime.GOMAXPROCS(0))
+ ctx.executor = queue.DynamicOrder()
+ ctx.generatePrograms()
var ok, fail, broken, skip int
- for req := range requests {
+ for _, req := range ctx.requests {
result := ""
verbose := false
if req.broken != "" {
@@ -158,14 +84,14 @@ func (ctx *Context) processResults(requests chan *runRequest) error {
result = fmt.Sprintf("SKIP (%v)", req.skip)
verbose = true
} else {
+ req.Request.Wait(context.Background())
if req.err != nil {
fail++
result = fmt.Sprintf("FAIL: %v",
strings.Replace(req.err.Error(), "\n", "\n\t", -1))
- res := req.result
- if len(res.Output) != 0 {
+ if req.result != nil && len(req.result.Output) != 0 {
result += fmt.Sprintf("\n\t%s",
- strings.Replace(string(res.Output), "\n", "\n\t", -1))
+ strings.Replace(string(req.result.Output), "\n", "\n\t", -1))
}
} else {
ok++
@@ -186,7 +112,52 @@ func (ctx *Context) processResults(requests chan *runRequest) error {
return nil
}
-func (ctx *Context) generatePrograms(progs chan *runRequest) error {
+func (ctx *Context) Next() *queue.Request {
+ return ctx.executor.Next()
+}
+
+func (ctx *Context) onDone(req *runRequest, res *queue.Result) bool {
+ // The tests depend on timings and may be flaky, esp on overloaded/slow machines.
+ // We don't want to fix this by significantly bumping all timeouts,
+ // because if a program fails all the time with the default timeouts,
+ // it will also fail during fuzzing. And we want to ensure that it's not the case.
+ // So what we want is to tolerate episodic failures with the default timeouts.
+ // To achieve this we run each test several times and ensure that it passes
+ // in 50+% of cases (i.e. 1/1, 2/3, 3/5, 4/7, etc).
+ // In the best case this allows to get off with just 1 test run.
+ if res.Err != nil {
+ req.err = res.Err
+ return true
+ }
+ req.result = res
+ err := checkResult(req)
+ if err == nil {
+ req.ok++
+ } else {
+ req.failed++
+ req.err = err
+ }
+ if req.ok > req.failed {
+ // There are more successful than failed runs.
+ req.err = nil
+ return true
+ }
+ // We need at least `failed - ok + 1` more runs <=> `failed + ok + need` in total,
+ // which simplifies to `failed * 2 + 1`.
+ retries := ctx.Retries
+ if retries%2 == 0 {
+ retries++
+ }
+ if req.failed*2+1 <= retries {
+ // We can still retry the execution.
+ ctx.submit(req)
+ return false
+ }
+ // Give up and fail on this request.
+ return true
+}
+
+func (ctx *Context) generatePrograms() error {
cover := []bool{false}
if ctx.Features&flatrpc.FeatureCoverage != 0 {
cover = append(cover, true)
@@ -201,7 +172,7 @@ func (ctx *Context) generatePrograms(progs chan *runRequest) error {
return err
}
for _, file := range files {
- if err := ctx.generateFile(progs, sandboxes, cover, file); err != nil {
+ if err := ctx.generateFile(sandboxes, cover, file); err != nil {
return err
}
}
@@ -225,7 +196,7 @@ func progFileList(dir, filter string) ([]string, error) {
return res, nil
}
-func (ctx *Context) generateFile(progs chan *runRequest, sandboxes []string, cover []bool, filename string) error {
+func (ctx *Context) generateFile(sandboxes []string, cover []bool, filename string) error {
p, requires, results, err := parseProg(ctx.Target, ctx.Dir, filename)
if err != nil {
return err
@@ -239,10 +210,10 @@ nextSandbox:
name := fmt.Sprintf("%v %v", filename, sandbox)
for _, call := range p.Calls {
if !ctx.EnabledCalls[sandbox][call.Meta] {
- progs <- &runRequest{
+ ctx.createTest(&runRequest{
name: name,
skip: fmt.Sprintf("unsupported call %v", call.Meta.Name),
- }
+ })
continue nextSandbox
}
}
@@ -267,6 +238,9 @@ nextSandbox:
if sandbox == "" {
break // executor does not support empty sandbox
}
+ if times != 1 {
+ break
+ }
name := name
if cov {
name += "/cover"
@@ -274,11 +248,11 @@ nextSandbox:
properties["cover"] = cov
properties["C"] = false
properties["executor"] = true
- req, err := ctx.createSyzTest(p, sandbox, threaded, cov, times)
+ req, err := ctx.createSyzTest(p, sandbox, threaded, cov)
if err != nil {
return err
}
- ctx.produceTest(progs, req, name, properties, requires, results)
+ ctx.produceTest(req, name, properties, requires, results)
}
if sysTarget.HostFuzzer {
// For HostFuzzer mode, we need to cross-compile
@@ -291,17 +265,17 @@ nextSandbox:
name += " C"
if !sysTarget.ExecutorUsesForkServer && times > 1 {
// Non-fork loop implementation does not support repetition.
- progs <- &runRequest{
+ ctx.createTest(&runRequest{
name: name,
broken: "non-forking loop",
- }
+ })
continue
}
req, err := ctx.createCTest(p, sandbox, threaded, times)
if err != nil {
return err
}
- ctx.produceTest(progs, req, name, properties, requires, results)
+ ctx.produceTest(req, name, properties, requires, results)
}
}
}
@@ -405,14 +379,52 @@ func checkArch(requires map[string]bool, arch string) bool {
return true
}
-func (ctx *Context) produceTest(progs chan *runRequest, req *runRequest, name string,
- properties, requires map[string]bool, results *flatrpc.ProgInfo) {
+func (ctx *Context) produceTest(req *runRequest, name string, properties,
+ requires map[string]bool, results *flatrpc.ProgInfo) {
req.name = name
req.results = results
if !match(properties, requires) {
req.skip = "excluded by constraints"
}
- progs <- req
+ ctx.createTest(req)
+}
+
+func (ctx *Context) createTest(req *runRequest) {
+ req.executor = ctx.executor.Append()
+ ctx.requests = append(ctx.requests, req)
+ if req.skip != "" || req.broken != "" {
+ return
+ }
+ if req.sourceOpts == nil {
+ ctx.submit(req)
+ return
+ }
+ go func() {
+ ctx.buildSem <- true
+ defer func() {
+ <-ctx.buildSem
+ }()
+ src, err := csource.Write(req.Prog, *req.sourceOpts)
+ if err != nil {
+ req.err = fmt.Errorf("failed to create C source: %w", err)
+ req.Request.Done(&queue.Result{})
+ }
+ bin, err := csource.Build(ctx.Target, src)
+ if err != nil {
+ req.err = fmt.Errorf("failed to build C program: %w", err)
+ req.Request.Done(&queue.Result{})
+ return
+ }
+ req.BinaryFile = bin
+ ctx.submit(req)
+ }()
+}
+
+func (ctx *Context) submit(req *runRequest) {
+ req.OnDone(func(_ *queue.Request, res *queue.Result) bool {
+ return ctx.onDone(req, res)
+ })
+ req.executor.Submit(req.Request)
}
func match(props, requires map[string]bool) bool {
@@ -436,9 +448,9 @@ func match(props, requires map[string]bool) bool {
return true
}
-func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bool, times int) (*runRequest, error) {
+func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bool) (*runRequest, error) {
var opts flatrpc.ExecOpts
- sandboxFlags, err := ipc.SandboxToFlags(sandbox)
+ sandboxFlags, err := flatrpc.SandboxToFlags(sandbox)
if err != nil {
return nil, err
}
@@ -451,7 +463,7 @@ func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bo
opts.ExecFlags |= flatrpc.ExecFlagCollectSignal
opts.ExecFlags |= flatrpc.ExecFlagCollectCover
}
- opts.EnvFlags |= ipc.FeaturesToFlags(ctx.Features, nil)
+ opts.EnvFlags |= csource.FeaturesToFlags(ctx.Features, nil)
if ctx.Debug {
opts.EnvFlags |= flatrpc.ExecEnvDebug
}
@@ -459,7 +471,6 @@ func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bo
Request: &queue.Request{
Prog: p,
ExecOpts: opts,
- Repeat: times,
},
}
return req, nil
@@ -496,27 +507,19 @@ func (ctx *Context) createCTest(p *prog.Prog, sandbox string, threaded bool, tim
opts.IEEE802154 = true
}
}
- src, err := csource.Write(p, opts)
- if err != nil {
- return nil, fmt.Errorf("failed to create C source: %w", err)
- }
- bin, err := csource.Build(p.Target, src)
- if err != nil {
- return nil, fmt.Errorf("failed to build C program: %w", err)
- }
var ipcFlags flatrpc.ExecFlag
if threaded {
ipcFlags |= flatrpc.ExecFlagThreaded
}
req := &runRequest{
+ sourceOpts: &opts,
Request: &queue.Request{
- Prog: p,
- BinaryFile: bin,
+ Prog: p,
ExecOpts: flatrpc.ExecOpts{
ExecFlags: ipcFlags,
},
- Repeat: times,
},
+ repeat: times,
}
return req, nil
}
@@ -525,27 +528,17 @@ func checkResult(req *runRequest) error {
if req.result.Status != queue.Success {
return fmt.Errorf("non-successful result status (%v)", req.result.Status)
}
- var infos []*flatrpc.ProgInfo
+ infos := []*flatrpc.ProgInfo{req.result.Info}
isC := req.BinaryFile != ""
if isC {
var err error
if infos, err = parseBinOutput(req); err != nil {
return err
}
- } else {
- raw := req.result.Info
- for len(raw.Calls) != 0 {
- ncalls := min(len(raw.Calls), len(req.Prog.Calls))
- infos = append(infos, &flatrpc.ProgInfo{
- Extra: raw.Extra,
- Calls: raw.Calls[:ncalls],
- })
- raw.Calls = raw.Calls[ncalls:]
- }
- }
- if req.Repeat != len(infos) {
- return fmt.Errorf("should repeat %v times, but repeated %v, prog calls %v, info calls %v\n%s",
- req.Repeat, len(infos), req.Prog.Calls, len(req.result.Info.Calls), req.result.Output)
+ if req.repeat != len(infos) {
+ return fmt.Errorf("should repeat %v times, but repeated %v, prog calls %v, info calls %v\n%s",
+ req.repeat, len(infos), req.Prog.Calls, len(req.result.Info.Calls), req.result.Output)
+ }
}
calls := make(map[string]bool)
for run, info := range infos {
diff --git a/pkg/runtest/run_test.go b/pkg/runtest/run_test.go
index f04ad4b0f..92b6c2d77 100644
--- a/pkg/runtest/run_test.go
+++ b/pkg/runtest/run_test.go
@@ -8,41 +8,41 @@ import (
"context"
"encoding/binary"
"encoding/hex"
- "errors"
"flag"
"fmt"
- "os"
"path/filepath"
"runtime"
"testing"
- "time"
"github.com/google/syzkaller/pkg/csource"
"github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
- "github.com/google/syzkaller/pkg/ipc"
"github.com/google/syzkaller/pkg/osutil"
+ "github.com/google/syzkaller/pkg/rpcserver"
"github.com/google/syzkaller/pkg/testutil"
+ "github.com/google/syzkaller/pkg/vminfo"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/targets"
_ "github.com/google/syzkaller/sys/test/gen" // pull in the test target
"github.com/stretchr/testify/assert"
)
-// Can be used as:
-// go test -v -run=Test/64_fork ./pkg/runtest -filter=nonfailing
-// to select a subset of tests to run.
-var flagFilter = flag.String("filter", "", "prefix to match test file names")
-
-var flagDebug = flag.Bool("debug", false, "include debug output from the executor")
+var (
+ // Can be used as:
+ // go test -v -run=Test/64_fork ./pkg/runtest -filter=nonfailing
+ // to select a subset of tests to run.
+ flagFilter = flag.String("filter", "", "prefix to match test file names")
+ flagDebug = flag.Bool("debug", false, "include debug output from the executor")
+ flagGDB = flag.Bool("gdb", false, "run executor under gdb")
+)
-func Test(t *testing.T) {
+func TestUnit(t *testing.T) {
switch runtime.GOOS {
case targets.OpenBSD:
t.Skipf("broken on %v", runtime.GOOS)
}
// Test only one target in short mode (each takes 5+ seconds to run).
- shortTarget := targets.Get(targets.TestOS, targets.TestArch64)
+ shortTarget := targets.Get(targets.TestOS, targets.TestArch64Fork)
for _, sysTarget := range targets.List[targets.TestOS] {
if testing.Short() && sysTarget != shortTarget {
continue
@@ -83,27 +83,7 @@ func test(t *testing.T, sysTarget *targets.Target) {
Verbose: true,
Debug: *flagDebug,
}
-
- executorCtx, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
- go func() {
- for {
- select {
- case <-time.After(time.Millisecond):
- case <-executorCtx.Done():
- return
- }
- req := ctx.Next()
- if req == nil {
- continue
- }
- if req.BinaryFile != "" {
- req.Done(runTestC(req))
- } else {
- req.Done(runTest(req, executor))
- }
- }
- }()
+ startRpcserver(t, target, executor, ctx)
if err := ctx.Run(); err != nil {
t.Fatal(err)
}
@@ -114,7 +94,7 @@ func TestCover(t *testing.T) {
// We inject given blobs into KCOV buffer using syz_inject_cover,
// and then test what we get back.
t.Parallel()
- for _, arch := range []string{targets.TestArch32, targets.TestArch64} {
+ for _, arch := range []string{targets.TestArch32, targets.TestArch64, targets.TestArch64Fork} {
sysTarget := targets.Get(targets.TestOS, arch)
t.Run(arch, func(t *testing.T) {
if sysTarget.BrokenCompiler != "" {
@@ -202,15 +182,15 @@ func testCover(t *testing.T, target *prog.Target) {
Input: makeCover64(0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011,
0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033, 0xc0dec0dec0000011),
Flags: flatrpc.ExecFlagCollectCover,
- Cover: []uint64{0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011,
- 0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033, 0xc0dec0dec0000011},
+ Cover: []uint64{0xc0dec0dec0000011, 0xc0dec0dec0000033, 0xc0dec0dec0000022,
+ 0xc0dec0dec0000011, 0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033},
},
{
Is64Bit: 1,
Input: makeCover64(0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011,
0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033, 0xc0dec0dec0000011),
Flags: flatrpc.ExecFlagCollectCover | flatrpc.ExecFlagDedupCover,
- Cover: []uint64{0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033},
+ Cover: []uint64{0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011},
},
// Signal hashing.
{
@@ -218,8 +198,8 @@ func testCover(t *testing.T, target *prog.Target) {
Input: makeCover64(0xc0dec0dec0011001, 0xc0dec0dec0022002, 0xc0dec0dec00330f0,
0xc0dec0dec0044b00, 0xc0dec0dec0011001, 0xc0dec0dec0022002),
Flags: flatrpc.ExecFlagCollectSignal,
- Signal: []uint64{0xc0dec0dec0011001, 0xc0dec0dec0022003, 0xc0dec0dec00330f2,
- 0xc0dec0dec0044bf0, 0xc0dec0dec0011b01},
+ Signal: []uint64{0xc0dec0dec0011b01, 0xc0dec0dec0044bf0, 0xc0dec0dec00330f2,
+ 0xc0dec0dec0022003, 0xc0dec0dec0011001},
},
// Invalid non-kernel PCs must fail test execution.
{
@@ -296,38 +276,49 @@ func testCover(t *testing.T, target *prog.Target) {
// TODO: test max signal filtering and cover filter when syz-executor handles them.
}
executor := csource.BuildExecutor(t, target, "../../")
+ source := queue.Plain()
+ startRpcserver(t, target, executor, source)
for i, test := range tests {
test := test
t.Run(fmt.Sprint(i), func(t *testing.T) {
t.Parallel()
- testCover1(t, target, executor, test)
+ testCover1(t, target, test, source)
})
}
}
-func testCover1(t *testing.T, target *prog.Target, executor string, test CoverTest) {
+func testCover1(t *testing.T, target *prog.Target, test CoverTest, source *queue.PlainQueue) {
text := fmt.Sprintf(`syz_inject_cover(0x%v, &AUTO="%s", AUTO)`, test.Is64Bit, hex.EncodeToString(test.Input))
p, err := target.Deserialize([]byte(text), prog.Strict)
if err != nil {
t.Fatal(err)
}
req := &queue.Request{
- Prog: p,
- Repeat: 1,
+ Prog: p,
ExecOpts: flatrpc.ExecOpts{
- EnvFlags: flatrpc.ExecEnvSignal,
+ EnvFlags: flatrpc.ExecEnvSignal | flatrpc.ExecEnvSandboxNone,
ExecFlags: test.Flags,
},
}
- res := runTest(req, executor)
+ if test.Flags&flatrpc.ExecFlagCollectSignal != 0 {
+ req.ReturnAllSignal = []int{0}
+ }
+ source.Submit(req)
+ res := req.Wait(context.Background())
if res.Err != nil || res.Info == nil || len(res.Info.Calls) != 1 || res.Info.Calls[0] == nil {
- t.Fatalf("program execution failed: %v\n%s", res.Err, res.Output)
+ t.Fatalf("program execution failed: status=%v err=%v\n%s", res.Status, res.Err, res.Output)
}
call := res.Info.Calls[0]
var comps [][2]uint64
for _, cmp := range call.Comps {
comps = append(comps, [2]uint64{cmp.Op1, cmp.Op2})
}
+ if test.Cover == nil {
+ test.Cover = []uint64{}
+ }
+ if test.Signal == nil {
+ test.Signal = []uint64{}
+ }
assert.Equal(t, test.Cover, call.Cover)
assert.Equal(t, test.Signal, call.Signal)
// Comparisons are reordered and order does not matter, so compare without order.
@@ -361,72 +352,38 @@ func makeComps(comps ...Comparison) []byte {
return w.Bytes()
}
-func runTest(req *queue.Request, executor string) *queue.Result {
- cfg := new(ipc.Config)
- sysTarget := targets.Get(req.Prog.Target.OS, req.Prog.Target.Arch)
- cfg.UseForkServer = sysTarget.ExecutorUsesForkServer
- cfg.Timeouts = sysTarget.Timeouts(1)
- cfg.Executor = executor
- env, err := ipc.MakeEnv(cfg, 0)
- if err != nil {
- return &queue.Result{
- Status: queue.ExecFailure,
- Err: fmt.Errorf("failed to create ipc env: %w", err),
- }
+func startRpcserver(t *testing.T, target *prog.Target, executor string, source queue.Source) {
+ ctx, done := context.WithCancel(context.Background())
+ cfg := &rpcserver.LocalConfig{
+ Config: rpcserver.Config{
+ Config: vminfo.Config{
+ Target: target,
+ Debug: *flagDebug,
+ Features: flatrpc.FeatureSandboxNone,
+ Sandbox: flatrpc.ExecEnvSandboxNone,
+ },
+ Procs: runtime.GOMAXPROCS(0),
+ Slowdown: 10, // to deflake slower tests
+ },
+ Executor: executor,
+ Dir: t.TempDir(),
+ Context: ctx,
+ GDB: *flagGDB,
}
- defer env.Close()
- ret := &queue.Result{Status: queue.Success}
- for run := 0; run < req.Repeat; run++ {
- if run%2 == 0 {
- // Recreate Env every few iterations, this allows to cover more paths.
- env.ForceRestart()
- }
- output, info, hanged, err := env.Exec(&req.ExecOpts, req.Prog)
- ret.Output = append(ret.Output, output...)
- if err != nil {
- return &queue.Result{
- Status: queue.ExecFailure,
- Err: fmt.Errorf("run %v: failed to run: %w", run, err),
- }
- }
- if hanged {
- return &queue.Result{
- Status: queue.ExecFailure,
- Err: fmt.Errorf("run %v: hanged", run),
- }
- }
- if run == 0 {
- ret.Info = info
- } else {
- ret.Info.Calls = append(ret.Info.Calls, info.Calls...)
- }
+ cfg.MachineChecked = func(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source {
+ cfg.Cover = true
+ return source
}
- return ret
-}
-
-func runTestC(req *queue.Request) *queue.Result {
- tmpDir, err := os.MkdirTemp("", "syz-runtest")
- if err != nil {
- return &queue.Result{
- Status: queue.ExecFailure,
- Err: fmt.Errorf("failed to create temp dir: %w", err),
+ errc := make(chan error)
+ go func() {
+ errc <- rpcserver.RunLocal(cfg)
+ }()
+ t.Cleanup(func() {
+ done()
+ if err := <-errc; err != nil {
+ t.Fatal(err)
}
- }
- defer os.RemoveAll(tmpDir)
- cmd := osutil.Command(req.BinaryFile)
- cmd.Dir = tmpDir
- // Tell ASAN to not mess with our NONFAILING.
- cmd.Env = append(append([]string{}, os.Environ()...), "ASAN_OPTIONS=handle_segv=0 allow_user_segv_handler=1")
- res := &queue.Result{}
- res.Output, res.Err = osutil.Run(20*time.Second, cmd)
- var verr *osutil.VerboseError
- if errors.As(res.Err, &verr) {
- // The process can legitimately do something like exit_group(1).
- // So we ignore the error and rely on the rest of the checks (e.g. syscall return values).
- res.Err = nil
- res.Output = verr.Output
- }
- return res
+ })
}
func TestParsing(t *testing.T) {
diff --git a/pkg/vminfo/features.go b/pkg/vminfo/features.go
index 150b658fc..67969a37f 100644
--- a/pkg/vminfo/features.go
+++ b/pkg/vminfo/features.go
@@ -49,6 +49,10 @@ func (ctx *checkContext) startFeaturesCheck() {
testProg := ctx.target.DataMmapProg()
for feat := range flatrpc.EnumNamesFeature {
feat := feat
+ if ctx.cfg.Features&feat == 0 {
+ ctx.features <- featureResult{feat, "disabled by user"}
+ continue
+ }
go func() {
envFlags, execFlags := ctx.featureToFlags(feat)
req := &queue.Request{
@@ -106,7 +110,7 @@ func (ctx *checkContext) finishFeatures(featureInfos []*flatrpc.FeatureInfo) (Fe
feat.Reason = strings.TrimSpace(outputReplacer.Replace(feat.Reason))
features[res.id] = feat
}
- if feat := features[flatrpc.FeatureSandboxSetuid]; !feat.Enabled {
+ if feat := features[flatrpc.FeatureSandboxNone]; !feat.Enabled {
return features, fmt.Errorf("execution of simple program fails: %v", feat.Reason)
}
if feat := features[flatrpc.FeatureCoverage]; ctx.cfg.Cover && !feat.Enabled {
@@ -118,7 +122,7 @@ func (ctx *checkContext) finishFeatures(featureInfos []*flatrpc.FeatureInfo) (Fe
// featureToFlags creates ipc flags required to test the feature on a simple program.
// For features that has setup procedure in the executor, we just execute with the default flags.
func (ctx *checkContext) featureToFlags(feat flatrpc.Feature) (flatrpc.ExecEnv, flatrpc.ExecFlag) {
- envFlags := ctx.sandbox
+ envFlags := ctx.cfg.Sandbox
// These don't have a corresponding feature and are always enabled.
envFlags |= flatrpc.ExecEnvEnableCloseFds | flatrpc.ExecEnvEnableCgroups | flatrpc.ExecEnvEnableNetReset
execFlags := flatrpc.ExecFlagThreaded
@@ -135,12 +139,18 @@ func (ctx *checkContext) featureToFlags(feat flatrpc.Feature) (flatrpc.ExecEnv,
case flatrpc.FeatureDelayKcovMmap:
envFlags |= flatrpc.ExecEnvSignal | flatrpc.ExecEnvDelayKcovMmap
execFlags |= flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover
+ case flatrpc.FeatureSandboxNone:
+ envFlags &= ^ctx.cfg.Sandbox
+ envFlags |= flatrpc.ExecEnvSandboxNone
case flatrpc.FeatureSandboxSetuid:
- // We use setuid sandbox feature to test that the simple program
- // succeeds under the actual sandbox (not necessary setuid).
- // We do this because we don't have a feature for sandbox 'none'.
+ envFlags &= ^ctx.cfg.Sandbox
+ envFlags |= flatrpc.ExecEnvSandboxSetuid
case flatrpc.FeatureSandboxNamespace:
+ envFlags &= ^ctx.cfg.Sandbox
+ envFlags |= flatrpc.ExecEnvSandboxNamespace
case flatrpc.FeatureSandboxAndroid:
+ envFlags &= ^ctx.cfg.Sandbox
+ envFlags |= flatrpc.ExecEnvSandboxAndroid
case flatrpc.FeatureFault:
case flatrpc.FeatureLeak:
case flatrpc.FeatureNetInjection:
diff --git a/pkg/vminfo/syscalls.go b/pkg/vminfo/syscalls.go
index 8a533227b..178e5d52c 100644
--- a/pkg/vminfo/syscalls.go
+++ b/pkg/vminfo/syscalls.go
@@ -11,8 +11,6 @@ import (
"github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
- "github.com/google/syzkaller/pkg/ipc"
- "github.com/google/syzkaller/pkg/mgrconfig"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/targets"
)
@@ -31,9 +29,8 @@ import (
type checkContext struct {
ctx context.Context
impl checker
- cfg *mgrconfig.Config
+ cfg *Config
target *prog.Target
- sandbox flatrpc.ExecEnv
executor queue.Executor
fs filesystem
// Once checking of a syscall is finished, the result is sent to syscalls.
@@ -48,18 +45,12 @@ type syscallResult struct {
reason string
}
-func newCheckContext(ctx context.Context, cfg *mgrconfig.Config, impl checker,
- executor queue.Executor) *checkContext {
- sandbox, err := ipc.SandboxToFlags(cfg.Sandbox)
- if err != nil {
- panic(fmt.Sprintf("failed to parse sandbox: %v", err))
- }
+func newCheckContext(ctx context.Context, cfg *Config, impl checker, executor queue.Executor) *checkContext {
return &checkContext{
ctx: ctx,
impl: impl,
cfg: cfg,
target: cfg.Target,
- sandbox: sandbox,
executor: executor,
syscalls: make(chan syscallResult),
features: make(chan featureResult, 100),
@@ -67,6 +58,7 @@ func newCheckContext(ctx context.Context, cfg *mgrconfig.Config, impl checker,
}
func (ctx *checkContext) start(fileInfos []*flatrpc.FileInfo) {
+ sysTarget := targets.Get(ctx.cfg.Target.OS, ctx.cfg.Target.Arch)
ctx.fs = createVirtualFilesystem(fileInfos)
for _, id := range ctx.cfg.Syscalls {
call := ctx.target.Syscalls[id]
@@ -82,12 +74,12 @@ func (ctx *checkContext) start(fileInfos []*flatrpc.FileInfo) {
}
// HostFuzzer targets can't run Go binaries on the targets,
// so we actually run on the host on another OS. The same for targets.TestOS OS.
- if ctx.cfg.SysTarget.HostFuzzer || ctx.target.OS == targets.TestOS {
+ if sysTarget.HostFuzzer || ctx.target.OS == targets.TestOS {
syscallCheck = alwaysSupported
}
go func() {
var reason string
- deps := ctx.cfg.SysTarget.PseudoSyscallDeps[call.CallName]
+ deps := sysTarget.PseudoSyscallDeps[call.CallName]
if len(deps) != 0 {
reason = ctx.supportedSyscalls(deps)
}
@@ -215,14 +207,14 @@ func (ctx *checkContext) anyCallSucceeds(calls []string, msg string) string {
}
func (ctx *checkContext) onlySandboxNone() string {
- if ctx.sandbox != 0 {
+ if ctx.cfg.Sandbox != flatrpc.ExecEnvSandboxNone {
return "only supported under root with sandbox=none"
}
return ""
}
func (ctx *checkContext) onlySandboxNoneOrNamespace() string {
- if ctx.sandbox != 0 && ctx.sandbox != flatrpc.ExecEnvSandboxNamespace {
+ if ctx.cfg.Sandbox != flatrpc.ExecEnvSandboxNone && ctx.cfg.Sandbox != flatrpc.ExecEnvSandboxNamespace {
return "only supported under root with sandbox=none/namespace"
}
return ""
@@ -237,9 +229,9 @@ func (ctx *checkContext) val(name string) uint64 {
}
func (ctx *checkContext) execRaw(calls []string, mode prog.DeserializeMode, root bool) *flatrpc.ProgInfo {
- sandbox := ctx.sandbox
+ sandbox := ctx.cfg.Sandbox
if root {
- sandbox = 0
+ sandbox = flatrpc.ExecEnvSandboxNone
}
info := &flatrpc.ProgInfo{}
for remain := calls; len(remain) != 0; {
@@ -265,13 +257,9 @@ func (ctx *checkContext) execRaw(calls []string, mode prog.DeserializeMode, root
res := req.Wait(ctx.ctx)
if res.Status == queue.Success {
info.Calls = append(info.Calls, res.Info.Calls...)
- } else if res.Status == queue.Crashed {
+ } else {
// Pretend these calls were not executed.
info.Calls = append(info.Calls, flatrpc.EmptyProgInfo(ncalls).Calls...)
- } else {
- // The program must have been either executed or not due to a crash.
- panic(fmt.Sprintf("got unexpected execution status (%d) for the prog %s",
- res.Status, progStr))
}
}
if len(info.Calls) != len(calls) {
diff --git a/pkg/vminfo/vminfo.go b/pkg/vminfo/vminfo.go
index a880f2f5b..b65baaac8 100644
--- a/pkg/vminfo/vminfo.go
+++ b/pkg/vminfo/vminfo.go
@@ -25,7 +25,6 @@ import (
"github.com/google/syzkaller/pkg/cover"
"github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
- "github.com/google/syzkaller/pkg/mgrconfig"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/targets"
)
@@ -36,9 +35,21 @@ type Checker struct {
checkContext *checkContext
}
-func New(cfg *mgrconfig.Config) *Checker {
+type Config struct {
+ Target *prog.Target
+ // Set of features to check, missing features won't be checked/enabled after Run.
+ Features flatrpc.Feature
+ // Set of syscalls to check.
+ Syscalls []int
+ Debug bool
+ Cover bool
+ Sandbox flatrpc.ExecEnv
+ SandboxArg int64
+}
+
+func New(cfg *Config) *Checker {
var impl checker
- switch cfg.TargetOS {
+ switch cfg.Target.OS {
case targets.Linux:
impl = new(linux)
case targets.NetBSD:
diff --git a/pkg/vminfo/vminfo_test.go b/pkg/vminfo/vminfo_test.go
index f58e3f7e5..2be23ca66 100644
--- a/pkg/vminfo/vminfo_test.go
+++ b/pkg/vminfo/vminfo_test.go
@@ -4,6 +4,8 @@
package vminfo
import (
+ "os"
+ "path/filepath"
"runtime"
"strings"
"testing"
@@ -11,9 +13,6 @@ import (
"github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
- "github.com/google/syzkaller/pkg/host"
- "github.com/google/syzkaller/pkg/ipc"
- "github.com/google/syzkaller/pkg/mgrconfig"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/targets"
)
@@ -121,29 +120,62 @@ func createSuccessfulResults(source queue.Source, stop chan struct{}) {
func hostChecker(t *testing.T) (*Checker, []*flatrpc.FileInfo) {
cfg := testConfig(t, runtime.GOOS, runtime.GOARCH)
checker := New(cfg)
- files := host.ReadFiles(checker.RequiredFiles())
+ files := readFiles(checker.RequiredFiles())
return checker, files
}
-func testConfig(t *testing.T, OS, arch string) *mgrconfig.Config {
+func testConfig(t *testing.T, OS, arch string) *Config {
target, err := prog.GetTarget(OS, arch)
if err != nil {
t.Fatal(err)
}
- cfg := &mgrconfig.Config{
- Sandbox: ipc.FlagsToSandbox(0),
- Derived: mgrconfig.Derived{
- TargetOS: OS,
- TargetArch: arch,
- TargetVMArch: arch,
- Target: target,
- SysTarget: targets.Get(OS, arch),
- },
- }
+ var syscalls []int
for id := range target.Syscalls {
if !target.Syscalls[id].Attrs.Disabled {
- cfg.Syscalls = append(cfg.Syscalls, id)
+ syscalls = append(syscalls, id)
+ }
+ }
+ return &Config{
+ Target: target,
+ Features: flatrpc.AllFeatures,
+ Sandbox: flatrpc.ExecEnvSandboxNone,
+ Syscalls: syscalls,
+ }
+}
+
+func readFiles(files []string) []*flatrpc.FileInfo {
+ var res []*flatrpc.FileInfo
+ for _, glob := range files {
+ glob = filepath.FromSlash(glob)
+ if !strings.Contains(glob, "*") {
+ res = append(res, readFile(glob))
+ continue
+ }
+ matches, err := filepath.Glob(glob)
+ if err != nil {
+ res = append(res, &flatrpc.FileInfo{
+ Name: glob,
+ Error: err.Error(),
+ })
+ continue
}
+ for _, file := range matches {
+ res = append(res, readFile(file))
+ }
+ }
+ return res
+}
+
+func readFile(file string) *flatrpc.FileInfo {
+ data, err := os.ReadFile(file)
+ exists, errStr := true, ""
+ if err != nil {
+ exists, errStr = !os.IsNotExist(err), err.Error()
+ }
+ return &flatrpc.FileInfo{
+ Name: file,
+ Exists: exists,
+ Error: errStr,
+ Data: data,
}
- return cfg
}
diff --git a/sys/linux/init.go b/sys/linux/init.go
index faf4a98af..55953eee0 100644
--- a/sys/linux/init.go
+++ b/sys/linux/init.go
@@ -4,8 +4,6 @@
package linux
import (
- "runtime"
-
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/targets"
)
@@ -109,27 +107,8 @@ func InitTarget(target *prog.Target) {
int(target.GetConst("XENSTORE_REL_PATH_MAX")),
1 << 16, // gVisor's MaxFilenameLen
}
-
- if target.Arch == runtime.GOARCH {
- KCOV_INIT_TRACE = uintptr(target.GetConst("KCOV_INIT_TRACE"))
- KCOV_ENABLE = uintptr(target.GetConst("KCOV_ENABLE"))
- KCOV_REMOTE_ENABLE = uintptr(target.GetConst("KCOV_REMOTE_ENABLE"))
- KCOV_DISABLE = uintptr(target.GetConst("KCOV_DISABLE"))
- KCOV_TRACE_PC = uintptr(target.GetConst("KCOV_TRACE_PC"))
- KCOV_TRACE_CMP = uintptr(target.GetConst("KCOV_TRACE_CMP"))
- }
}
-var (
- // This should not be here, but for now we expose this for syz-fuzzer.
- KCOV_INIT_TRACE uintptr
- KCOV_ENABLE uintptr
- KCOV_REMOTE_ENABLE uintptr
- KCOV_DISABLE uintptr
- KCOV_TRACE_PC uintptr
- KCOV_TRACE_CMP uintptr
-)
-
type arch struct {
unix *targets.UnixNeutralizer
diff --git a/sys/test/exec.txt b/sys/test/exec.txt
index 4b43b57b0..ffb56610b 100644
--- a/sys/test/exec.txt
+++ b/sys/test/exec.txt
@@ -9,7 +9,7 @@ syz_compare(want ptr[in, string], want_len bytesize[want], got ptr[in, compare_d
syz_compare_int$2(n const[2], v0 intptr, v1 intptr)
syz_compare_int$3(n const[3], v0 intptr, v1 intptr, v2 intptr)
syz_compare_int$4(n const[4], v0 intptr, v1 intptr, v2 intptr, v3 intptr)
-syz_compare_zlib(data ptr[in, array[int8]], size bytesize[data], zdata ptr[in, compressed_image], zsize bytesize[zdata]) (no_generate, no_minimize)
+syz_compare_zlib(data ptr[in, array[int8]], size bytesize[data], zdata ptr[in, compressed_image], zsize bytesize[zdata]) (timeout[4000], no_generate, no_minimize)
# Copies the data into KCOV buffer verbatim and sets assumed kernel bitness.
syz_inject_cover(is64 bool8, ptr ptr[in, array[int8]], size bytesize[ptr])
diff --git a/syz-ci/updater.go b/syz-ci/updater.go
index 1d45975a7..0eb901825 100644
--- a/syz-ci/updater.go
+++ b/syz-ci/updater.go
@@ -81,7 +81,6 @@ func NewSyzUpdater(cfg *Config) *SyzUpdater {
mgrcfg := mgr.managercfg
os, vmarch, arch := mgrcfg.TargetOS, mgrcfg.TargetVMArch, mgrcfg.TargetArch
targets[os+"/"+vmarch+"/"+arch] = true
- syzFiles[fmt.Sprintf("bin/%v_%v/syz-fuzzer", os, vmarch)] = true
syzFiles[fmt.Sprintf("bin/%v_%v/syz-execprog", os, vmarch)] = true
if mgrcfg.SysTarget.ExecutorBin == "" {
syzFiles[fmt.Sprintf("bin/%v_%v/syz-executor", os, arch)] = true
diff --git a/syz-fuzzer/fuzzer.go b/syz-fuzzer/fuzzer.go
deleted file mode 100644
index 93259cc03..000000000
--- a/syz-fuzzer/fuzzer.go
+++ /dev/null
@@ -1,304 +0,0 @@
-// Copyright 2015 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package main
-
-import (
- "flag"
- "fmt"
- "io"
- "net/http"
- _ "net/http/pprof"
- "os"
- "path/filepath"
- "runtime"
- "runtime/debug"
- "slices"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/google/syzkaller/pkg/flatrpc"
- "github.com/google/syzkaller/pkg/host"
- "github.com/google/syzkaller/pkg/ipc/ipcconfig"
- "github.com/google/syzkaller/pkg/log"
- "github.com/google/syzkaller/pkg/osutil"
- "github.com/google/syzkaller/pkg/signal"
- "github.com/google/syzkaller/pkg/tool"
- "github.com/google/syzkaller/prog"
- _ "github.com/google/syzkaller/sys"
- "github.com/google/syzkaller/sys/targets"
-)
-
-type FuzzerTool struct {
- conn *flatrpc.Conn
- executor string
- checkLeaks atomic.Int32
- timeouts targets.Timeouts
- leakFrames []string
-
- requests chan *flatrpc.ExecRequest
- signalMu sync.RWMutex
- maxSignal signal.Signal
-}
-
-// TODO: split into smaller methods.
-// nolint: funlen, gocyclo
-func main() {
- debug.SetGCPercent(50)
-
- var (
- flagName = flag.String("name", "test", "unique name for manager")
- flagOS = flag.String("os", runtime.GOOS, "target OS")
- flagArch = flag.String("arch", runtime.GOARCH, "target arch")
- flagManager = flag.String("manager", "", "manager rpc address")
- flagPprofPort = flag.Int("pprof_port", 0, "HTTP port for the pprof endpoint (disabled if 0)")
- )
- defer tool.Init()()
- log.Logf(0, "fuzzer started")
-
- target, err := prog.GetTarget(*flagOS, *flagArch)
- if err != nil {
- log.SyzFatal(err)
- }
-
- config, _, err := ipcconfig.Default(target)
- if err != nil {
- log.SyzFatalf("failed to create default ipc config: %v", err)
- }
- timeouts := config.Timeouts
- executor := config.Executor
- shutdown := make(chan struct{})
- osutil.HandleInterrupts(shutdown)
- go func() {
- // Handles graceful preemption on GCE.
- <-shutdown
- log.Logf(0, "SYZ-FUZZER: PREEMPTED")
- os.Exit(1)
- }()
-
- if *flagPprofPort != 0 {
- setupPprofHandler(*flagPprofPort)
- }
-
- executorArch, executorSyzRevision, executorGitRevision, err := executorVersion(executor)
- if err != nil {
- log.SyzFatalf("failed to run executor version: %v ", err)
- }
-
- log.Logf(0, "dialing manager at %v", *flagManager)
- conn, err := flatrpc.Dial(*flagManager, timeouts.Scale)
- if err != nil {
- log.SyzFatalf("failed to connect to host: %v ", err)
- }
-
- log.Logf(1, "connecting to manager...")
- connectReq := &flatrpc.ConnectRequest{
- Name: *flagName,
- Arch: executorArch,
- GitRevision: executorGitRevision,
- SyzRevision: executorSyzRevision,
- }
- if err := flatrpc.Send(conn, connectReq); err != nil {
- log.SyzFatal(err)
- }
- connectReplyRaw, err := flatrpc.Recv[flatrpc.ConnectReplyRaw](conn)
- if err != nil {
- log.SyzFatal(err)
- }
- connectReply := connectReplyRaw.UnPack()
-
- infoReq := &flatrpc.InfoRequest{
- Files: host.ReadFiles(connectReply.Files),
- }
- features, err := host.SetupFeatures(target, executor, connectReply.Features, nil)
- if err != nil {
- infoReq.Error = fmt.Sprintf("failed to setup features: %v ", err)
- }
- infoReq.Features = features
- for _, glob := range connectReply.Globs {
- files, err := filepath.Glob(filepath.FromSlash(glob))
- if err != nil && infoReq.Error == "" {
- infoReq.Error = fmt.Sprintf("failed to read glob %q: %v", glob, err)
- }
- infoReq.Globs = append(infoReq.Globs, &flatrpc.GlobInfo{
- Name: glob,
- Files: files,
- })
- }
- if err := flatrpc.Send(conn, infoReq); err != nil {
- log.SyzFatal(err)
- }
- infoReplyRaw, err := flatrpc.Recv[flatrpc.InfoReplyRaw](conn)
- if err != nil {
- log.SyzFatal(err)
- }
- infoReply := infoReplyRaw.UnPack()
- config.CoverFilter = infoReply.CoverFilter
-
- fuzzerTool := &FuzzerTool{
- conn: conn,
- executor: executor,
- timeouts: timeouts,
- leakFrames: connectReply.LeakFrames,
-
- requests: make(chan *flatrpc.ExecRequest, connectReply.Procs*4),
- }
- fuzzerTool.filterDataRaceFrames(connectReply.RaceFrames)
- // TODO: repair leak checking.
- _ = fuzzerTool.leakGateCallback
-
- log.Logf(0, "starting %v executor processes", connectReply.Procs)
- for pid := 0; pid < int(connectReply.Procs); pid++ {
- startProc(fuzzerTool, pid, config)
- }
-
- fuzzerTool.handleConn()
-}
-
-func (tool *FuzzerTool) leakGateCallback() {
- // Leak checking is very slow so we don't do it while triaging the corpus
- // (otherwise it takes infinity). When we have presumably triaged the corpus
- // (checkLeaks == 1), we run leak checking bug ignore the result
- // to flush any previous leaks. After that (checkLeaks == 2)
- // we do actual leak checking and report leaks.
- checkLeaks := tool.checkLeaks.Load()
- if checkLeaks == 0 {
- return
- }
- args := append([]string{"leak"}, tool.leakFrames...)
- timeout := tool.timeouts.NoOutput * 9 / 10
- output, err := osutil.RunCmd(timeout, "", tool.executor, args...)
- if err != nil && checkLeaks == 2 {
- // If we exit right away, dying executors will dump lots of garbage to console.
- os.Stdout.Write(output)
- fmt.Printf("BUG: leak checking failed\n")
- time.Sleep(time.Hour)
- os.Exit(1)
- }
- if checkLeaks == 1 {
- tool.checkLeaks.Store(2)
- }
-}
-
-func (tool *FuzzerTool) filterDataRaceFrames(frames []string) {
- if len(frames) == 0 {
- return
- }
- args := append([]string{"setup_kcsan_filterlist"}, frames...)
- timeout := time.Minute * tool.timeouts.Scale
- output, err := osutil.RunCmd(timeout, "", tool.executor, args...)
- if err != nil {
- log.SyzFatalf("failed to set KCSAN filterlist: %v", err)
- }
- log.Logf(0, "%s", output)
-}
-
-func (tool *FuzzerTool) startExecutingCall(progID int64, pid, try int, wait time.Duration) {
- msg := &flatrpc.ExecutorMessage{
- Msg: &flatrpc.ExecutorMessages{
- Type: flatrpc.ExecutorMessagesRawExecuting,
- Value: &flatrpc.ExecutingMessage{
- Id: progID,
- ProcId: int32(pid),
- Try: int32(try),
- WaitDuration: int64(wait),
- },
- },
- }
- if err := flatrpc.Send(tool.conn, msg); err != nil {
- log.SyzFatal(err)
- }
-}
-
-func (tool *FuzzerTool) handleConn() {
- for {
- raw, err := flatrpc.Recv[flatrpc.HostMessageRaw](tool.conn)
- if err != nil {
- log.SyzFatal(err)
- }
- switch msg := raw.UnPack().Msg.Value.(type) {
- case *flatrpc.ExecRequest:
- msg.ProgData = slices.Clone(msg.ProgData)
- tool.requests <- msg
- case *flatrpc.SignalUpdate:
- tool.handleSignalUpdate(msg)
- case *flatrpc.StartLeakChecks:
- tool.checkLeaks.Store(1)
- }
- }
-}
-
-func (tool *FuzzerTool) diffMaxSignal(info *flatrpc.ProgInfo, mask signal.Signal, maskCall int, allSignal []int32) {
- tool.signalMu.RLock()
- defer tool.signalMu.RUnlock()
- diffMaxSignal(info, tool.maxSignal, mask, maskCall, allSignal)
-}
-
-func diffMaxSignal(info *flatrpc.ProgInfo, max, mask signal.Signal, maskCall int, allSignal []int32) {
- numCalls := int32(len(info.Calls))
- all := make([]bool, numCalls+1)
- for _, c := range allSignal {
- if c < 0 {
- c = numCalls
- }
- if c <= numCalls {
- all[c] = true
- }
- }
- if info.Extra != nil {
- info.Extra.Signal = diffCallSignal(info.Extra.Signal, max, mask, -1, maskCall, all[numCalls])
- }
- for i := 0; i < len(info.Calls); i++ {
- info.Calls[i].Signal = diffCallSignal(info.Calls[i].Signal, max, mask, i, maskCall, all[i])
- }
-}
-
-func diffCallSignal(raw []uint64, max, mask signal.Signal, call, maskCall int, all bool) []uint64 {
- if mask != nil && call == maskCall {
- return signal.FilterRaw(raw, max, mask)
- }
- // If there is any new signal, we return whole signal, since the fuzzer will need it for triage.
- if all || max.HasNew(raw) {
- return raw
- }
- return nil
-}
-
-func (tool *FuzzerTool) handleSignalUpdate(msg *flatrpc.SignalUpdate) {
- tool.signalMu.Lock()
- defer tool.signalMu.Unlock()
- tool.maxSignal.Subtract(signal.FromRaw(msg.DropMax, 0))
- tool.maxSignal.Merge(signal.FromRaw(msg.NewMax, 0))
-}
-
-func setupPprofHandler(port int) {
- // Necessary for pprof handlers.
- go func() {
- err := http.ListenAndServe(fmt.Sprintf("0.0.0.0:%v", port), nil)
- if err != nil {
- log.SyzFatalf("failed to setup a server: %v", err)
- }
- }()
-}
-
-func executorVersion(bin string) (string, string, string, error) {
- args := strings.Split(bin, " ")
- args = append(args, "version")
- cmd := osutil.Command(args[0], args[1:]...)
- cmd.Stderr = io.Discard
- if _, err := cmd.StdinPipe(); err != nil { // for the case executor is wrapped with ssh
- return "", "", "", err
- }
- out, err := osutil.Run(time.Minute, cmd)
- if err != nil {
- return "", "", "", fmt.Errorf("failed to run executor version: %w", err)
- }
- // Executor returns OS, arch, descriptions hash, git revision.
- vers := strings.Split(strings.TrimSpace(string(out)), " ")
- if len(vers) != 4 {
- return "", "", "", fmt.Errorf("executor version returned bad result: %q", string(out))
- }
- return vers[1], vers[2], vers[3], nil
-}
diff --git a/syz-fuzzer/fuzzer_test.go b/syz-fuzzer/fuzzer_test.go
deleted file mode 100644
index 81ffdc818..000000000
--- a/syz-fuzzer/fuzzer_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2024 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package main
-
-import (
- "testing"
-
- "github.com/google/syzkaller/pkg/flatrpc"
- "github.com/google/syzkaller/pkg/signal"
- "github.com/stretchr/testify/assert"
-)
-
-func TestFilterProgInfo(t *testing.T) {
- max := signal.FromRaw([]uint64{5, 6, 7}, 0)
- mask := signal.FromRaw([]uint64{2, 4, 6, 8}, 0)
- info := flatrpc.ProgInfo{
- Calls: []*flatrpc.CallInfo{
- {
- Signal: []uint64{1, 2, 3, 5, 6},
- Cover: []uint64{1, 2, 3},
- },
- {
- Signal: []uint64{2, 3, 4, 6, 7},
- Cover: []uint64{2, 3, 4},
- },
- {
- Signal: []uint64{1, 2, 3, 4, 5, 6, 7, 8},
- },
- },
- Extra: &flatrpc.CallInfo{
- Signal: []uint64{3, 4, 5},
- Cover: []uint64{3, 4, 5},
- },
- }
- diffMaxSignal(&info, max, mask, 1, []int32{2})
- assert.Equal(t, flatrpc.ProgInfo{
- Calls: []*flatrpc.CallInfo{
- {
- Signal: []uint64{1, 2, 3, 5, 6},
- Cover: []uint64{1, 2, 3},
- },
- {
- Signal: []uint64{2, 3, 4, 6},
- Cover: []uint64{2, 3, 4},
- },
- {
- Signal: []uint64{1, 2, 3, 4, 5, 6, 7, 8},
- },
- },
- Extra: &flatrpc.CallInfo{
- Signal: []uint64{3, 4, 5},
- Cover: []uint64{3, 4, 5},
- },
- }, info)
-}
diff --git a/syz-fuzzer/proc.go b/syz-fuzzer/proc.go
deleted file mode 100644
index 85555a5a7..000000000
--- a/syz-fuzzer/proc.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2017 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package main
-
-import (
- "errors"
- "fmt"
- "os"
- "path/filepath"
- "time"
-
- "github.com/google/syzkaller/pkg/flatrpc"
- "github.com/google/syzkaller/pkg/ipc"
- "github.com/google/syzkaller/pkg/log"
- "github.com/google/syzkaller/pkg/osutil"
- "github.com/google/syzkaller/pkg/signal"
-)
-
-// Proc represents a single fuzzing process (executor).
-type Proc struct {
- tool *FuzzerTool
- pid int
- env *ipc.Env
-}
-
-func startProc(tool *FuzzerTool, pid int, config *ipc.Config) {
- env, err := ipc.MakeEnv(config, pid)
- if err != nil {
- log.SyzFatalf("failed to create env: %v", err)
- }
- proc := &Proc{
- tool: tool,
- pid: pid,
- env: env,
- }
- go proc.loop()
-}
-
-func (proc *Proc) loop() {
- for {
- req, wait := proc.nextRequest()
- if req.Flags&flatrpc.RequestFlagResetState != 0 {
- proc.env.ForceRestart()
- }
- info, output, err := proc.execute(req, wait)
- res := &flatrpc.ExecResult{
- Id: req.Id,
- Info: info,
- Output: output,
- Error: err,
- }
- for i := 1; i < int(req.Repeat) && res.Error == "" && req.Flags&flatrpc.RequestFlagIsBinary == 0; i++ {
- // Recreate Env every few iterations, this allows to cover more paths.
- if i%2 == 0 {
- proc.env.ForceRestart()
- }
- info, output, err := proc.execute(req, 0)
- if res.Info == nil {
- res.Info = info
- } else if info != nil {
- res.Info.Calls = append(res.Info.Calls, info.Calls...)
- }
- res.Output = append(res.Output, output...)
- res.Error = err
- }
- if res.Info != nil {
- filter := signal.FromRaw(req.SignalFilter, 0)
- proc.tool.diffMaxSignal(res.Info, filter, int(req.SignalFilterCall), req.AllSignal)
- }
- msg := &flatrpc.ExecutorMessage{
- Msg: &flatrpc.ExecutorMessages{
- Type: flatrpc.ExecutorMessagesRawExecResult,
- Value: res,
- },
- }
- if err := flatrpc.Send(proc.tool.conn, msg); err != nil {
- log.SyzFatal(err)
- }
- }
-}
-
-func (proc *Proc) nextRequest() (*flatrpc.ExecRequest, time.Duration) {
- select {
- case req := <-proc.tool.requests:
- return req, 0
- default:
- }
- // Not having enough inputs to execute is a sign of RPC communication problems.
- // Let's count and report such situations.
- start := osutil.MonotonicNano()
- req := <-proc.tool.requests
- wait := osutil.MonotonicNano() - start
- return req, wait
-}
-
-func (proc *Proc) execute(req *flatrpc.ExecRequest, wait time.Duration) (
- info *flatrpc.ProgInfo, output []byte, errStr string) {
- var err error
- if req.Flags&flatrpc.RequestFlagIsBinary != 0 {
- output, err = executeBinary(req)
- } else {
- info, output, err = proc.executeProgram(req, wait)
- }
- if req.Flags&flatrpc.RequestFlagReturnOutput == 0 {
- output = nil
- }
- if err != nil {
- errStr = err.Error()
- }
- return
-}
-
-func (proc *Proc) executeProgram(req *flatrpc.ExecRequest, wait time.Duration) (*flatrpc.ProgInfo, []byte, error) {
- returnError := req.Flags&flatrpc.RequestFlagReturnError != 0
- for try := 0; ; try++ {
- var output []byte
- var info *flatrpc.ProgInfo
- var hanged bool
- err := proc.env.RestartIfNeeded(req.ExecOpts)
- if err == nil {
- proc.tool.startExecutingCall(req.Id, proc.pid, try, wait)
- output, info, hanged, err = proc.env.ExecProg(req.ExecOpts, req.ProgData)
- // Don't print output if returning error b/c it may contain SYZFAIL.
- if !returnError {
- log.Logf(2, "result hanged=%v err=%v: %s", hanged, err, output)
- }
- if hanged && err == nil && returnError {
- err = errors.New("hanged")
- }
- }
- if err == nil || returnError {
- return info, output, err
- }
- log.Logf(4, "fuzzer detected executor failure='%v', retrying #%d", err, try+1)
- if try > 10 {
- log.SyzFatalf("executor %v failed %v times: %v\n%s", proc.pid, try, err, output)
- } else if try > 3 {
- time.Sleep(100 * time.Millisecond)
- }
- }
-}
-
-func executeBinary(req *flatrpc.ExecRequest) ([]byte, error) {
- tmp, err := os.MkdirTemp("", "syz-runtest")
- if err != nil {
- return nil, fmt.Errorf("failed to create temp dir: %w", err)
- }
- defer os.RemoveAll(tmp)
- bin := filepath.Join(tmp, "syz-executor")
- if err := os.WriteFile(bin, req.ProgData, 0777); err != nil {
- return nil, fmt.Errorf("failed to write binary: %w", err)
- }
- cmd := osutil.Command(bin)
- cmd.Dir = tmp
- // Tell ASAN to not mess with our NONFAILING.
- cmd.Env = append(append([]string{}, os.Environ()...), "ASAN_OPTIONS=handle_segv=0 allow_user_segv_handler=1")
- output, err := osutil.Run(20*time.Second, cmd)
- var verr *osutil.VerboseError
- if errors.As(err, &verr) {
- // The process can legitimately do something like exit_group(1).
- // So we ignore the error and rely on the rest of the checks (e.g. syscall return values).
- return verr.Output, nil
- }
- return output, err
-}
diff --git a/syz-manager/covfilter.go b/syz-manager/covfilter.go
index 8b7d54cd0..88ecd6bac 100644
--- a/syz-manager/covfilter.go
+++ b/syz-manager/covfilter.go
@@ -17,6 +17,16 @@ import (
"github.com/google/syzkaller/pkg/mgrconfig"
)
+func (mgr *Manager) CoverageFilter(modules []*cover.KernelModule) []uint64 {
+ execFilter, filter, err := createCoverageFilter(mgr.cfg, modules)
+ if err != nil {
+ log.Fatalf("failed to init coverage filter: %v", err)
+ }
+ mgr.modules = modules
+ mgr.coverFilter = filter
+ return execFilter
+}
+
func createCoverageFilter(cfg *mgrconfig.Config, modules []*cover.KernelModule) ([]uint64, map[uint64]struct{}, error) {
if !cfg.HasCovFilter() {
return nil, nil, nil
diff --git a/syz-manager/http.go b/syz-manager/http.go
index ea57ba3be..fc11e26c1 100644
--- a/syz-manager/http.go
+++ b/syz-manager/http.go
@@ -24,6 +24,7 @@ import (
"github.com/google/syzkaller/pkg/html/pages"
"github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
+ "github.com/google/syzkaller/pkg/rpcserver"
"github.com/google/syzkaller/pkg/stats"
"github.com/google/syzkaller/pkg/vcs"
"github.com/google/syzkaller/prog"
@@ -40,6 +41,8 @@ func (mgr *Manager) initHTTP() {
handle("/config", mgr.httpConfig)
handle("/expert_mode", mgr.httpExpertMode)
handle("/stats", mgr.httpStats)
+ handle("/vms", mgr.httpVMs)
+ handle("/vm", mgr.httpVM)
handle("/metrics", promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{}).ServeHTTP)
handle("/syscalls", mgr.httpSyscalls)
handle("/corpus", mgr.httpCorpus)
@@ -163,6 +166,51 @@ func (mgr *Manager) httpStats(w http.ResponseWriter, r *http.Request) {
w.Write(data)
}
+func (mgr *Manager) httpVMs(w http.ResponseWriter, r *http.Request) {
+ data := &UIVMData{
+ Name: mgr.cfg.Name,
+ }
+ // TODO: we could also query vmLoop for VMs that are idle (waiting to start reproducing),
+ // and query the exact bug that is being reproduced by a VM.
+ for name, state := range mgr.serv.VMState() {
+ info := UIVMInfo{
+ Name: name,
+ State: "unknown",
+ Since: time.Since(state.Timestamp),
+ }
+ switch state.State {
+ case rpcserver.StateOffline:
+ info.State = "reproducing"
+ case rpcserver.StateBooting:
+ info.State = "booting"
+ case rpcserver.StateFuzzing:
+ info.State = "fuzzing"
+ info.MachineInfo = fmt.Sprintf("/vm?type=machine-info&name=%v", name)
+ info.RunnerStatus = fmt.Sprintf("/vm?type=runner-status&name=%v", name)
+ case rpcserver.StateStopping:
+ info.State = "crashed"
+ }
+ data.VMs = append(data.VMs, info)
+ }
+ sort.Slice(data.VMs, func(i, j int) bool {
+ return data.VMs[i].Name < data.VMs[j].Name
+ })
+ executeTemplate(w, vmsTemplate, data)
+}
+
+func (mgr *Manager) httpVM(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", ctTextPlain)
+ vm := r.FormValue("name")
+ switch r.FormValue("type") {
+ case "machine-info":
+ w.Write(mgr.serv.MachineInfo(vm))
+ case "runner-status":
+ w.Write(mgr.serv.RunnerStatus(vm))
+ default:
+ w.Write([]byte("unknown info type"))
+ }
+}
+
func (mgr *Manager) httpCrash(w http.ResponseWriter, r *http.Request) {
crashID := r.FormValue("id")
crash := readCrash(mgr.cfg.Workdir, crashID, nil, mgr.firstConnect.Load(), true)
@@ -268,12 +316,12 @@ func (mgr *Manager) httpCoverCover(w http.ResponseWriter, r *http.Request, funcF
// Don't hold the mutex while creating report generator and generating the report,
// these operations take lots of time.
- if !mgr.serv.checkDone.Load() {
+ if !mgr.checkDone.Load() {
http.Error(w, "coverage is not ready, please try again later after fuzzer started", http.StatusInternalServerError)
return
}
- rg, err := getReportGenerator(mgr.cfg, mgr.serv.modules)
+ rg, err := getReportGenerator(mgr.cfg, mgr.modules)
if err != nil {
http.Error(w, fmt.Sprintf("failed to generate coverage profile: %v", err), http.StatusInternalServerError)
return
@@ -329,11 +377,11 @@ func (mgr *Manager) httpCoverCover(w http.ResponseWriter, r *http.Request, funcF
var coverFilter map[uint64]struct{}
if r.FormValue("filter") != "" || funcFlag == DoFilterPCs {
- if mgr.serv.coverFilter == nil {
+ if mgr.coverFilter == nil {
http.Error(w, "cover is not filtered in config", http.StatusInternalServerError)
return
}
- coverFilter = mgr.serv.coverFilter
+ coverFilter = mgr.coverFilter
}
params := cover.HandlerParams{
@@ -503,12 +551,7 @@ func (mgr *Manager) httpDebugInput(w http.ResponseWriter, r *http.Request) {
}
func (mgr *Manager) modulesInfo(w http.ResponseWriter, r *http.Request) {
- if mgr.serv.canonicalModules == nil {
- fmt.Fprintf(w, "module information not retrieved yet, please retry after fuzzing starts\n")
- return
- }
- // NewCanonicalizer() is initialized with serv.modules.
- modules, err := json.MarshalIndent(mgr.serv.modules, "", "\t")
+ modules, err := json.MarshalIndent(mgr.modules, "", "\t")
if err != nil {
fmt.Fprintf(w, "unable to create JSON modules info: %v", err)
return
@@ -728,6 +771,19 @@ type UISummaryData struct {
Log string
}
+type UIVMData struct {
+ Name string
+ VMs []UIVMInfo
+}
+
+type UIVMInfo struct {
+ Name string
+ State string
+ Since time.Duration
+ MachineInfo string
+ RunnerStatus string
+}
+
type UISyscallsData struct {
Name string
Calls []UICallType
@@ -846,6 +902,37 @@ var summaryTemplate = pages.Create(`
</body></html>
`)
+var vmsTemplate = pages.Create(`
+<!doctype html>
+<html>
+<head>
+ <title>{{.Name }} syzkaller</title>
+ {{HEAD}}
+</head>
+<body>
+
+<table class="list_table">
+ <caption>VM Info:</caption>
+ <tr>
+ <th><a onclick="return sortTable(this, 'Name', textSort)" href="#">Name</a></th>
+ <th><a onclick="return sortTable(this, 'State', textSort)" href="#">State</a></th>
+ <th><a onclick="return sortTable(this, 'Since', timeSort)" href="#">Since</a></th>
+ <th><a onclick="return sortTable(this, 'Machine Info', timeSort)" href="#">Machine Info</a></th>
+ <th><a onclick="return sortTable(this, 'Runner Status', timeSort)" href="#">Runner Status</a></th>
+ </tr>
+ {{range $vm := $.VMs}}
+ <tr>
+ <td>{{$vm.Name}}</td>
+ <td>{{$vm.State}}</td>
+ <td>{{formatDuration $vm.Since}}</td>
+ <td>{{optlink $vm.MachineInfo "info"}}</td>
+ <td>{{optlink $vm.RunnerStatus "status"}}</td>
+ </tr>
+ {{end}}
+</table>
+</body></html>
+`)
+
var syscallsTemplate = pages.Create(`
<!doctype html>
<html>
diff --git a/syz-manager/hub.go b/syz-manager/hub.go
index 5ed0570c3..7d798fdd2 100644
--- a/syz-manager/hub.go
+++ b/syz-manager/hub.go
@@ -4,6 +4,7 @@
package main
import (
+ "fmt"
"net/http"
"strings"
"time"
@@ -237,10 +238,9 @@ func (hc *HubConnector) sync(hub *rpctype.RPCClient, corpus [][]byte) error {
func (hc *HubConnector) processProgs(inputs []rpctype.HubInput) (minimized, smashed, dropped int) {
candidates := make([]fuzzer.Candidate, 0, len(inputs))
for _, inp := range inputs {
- p, disabled, bad := parseProgram(hc.target, hc.enabledCalls, inp.Prog)
- if bad != nil || disabled {
- log.Logf(0, "rejecting program from hub (bad=%v, disabled=%v):\n%s",
- bad, disabled, inp)
+ p, err := hc.parseProgram(inp.Prog)
+ if err != nil {
+ log.Logf(0, "rejecting program from hub: %v\n%s", err, inp.Prog)
dropped++
continue
}
@@ -292,10 +292,9 @@ func splitDomains(domain string) (string, string) {
func (hc *HubConnector) processRepros(repros [][]byte) int {
dropped := 0
for _, repro := range repros {
- _, disabled, bad := parseProgram(hc.target, hc.enabledCalls, repro)
- if bad != nil || disabled {
- log.Logf(0, "rejecting repro from hub (bad=%v, disabled=%v):\n%s",
- bad, disabled, repro)
+ _, err := hc.parseProgram(repro)
+ if err != nil {
+ log.Logf(0, "rejecting repro from hub: %v\n%s", err, repro)
dropped++
continue
}
@@ -317,3 +316,14 @@ func (hc *HubConnector) processRepros(repros [][]byte) int {
}
return dropped
}
+
+func (hc *HubConnector) parseProgram(data []byte) (*prog.Prog, error) {
+ p, err := loadProg(hc.target, data)
+ if err != nil {
+ return nil, err
+ }
+ if containsDisabled(p, hc.enabledCalls) {
+ return nil, fmt.Errorf("contains disabled calls")
+ }
+ return p, nil
+}
diff --git a/syz-manager/last_executing_test.go b/syz-manager/last_executing_test.go
deleted file mode 100644
index d373d9f58..000000000
--- a/syz-manager/last_executing_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2024 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package main
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestLastExecutingEmpty(t *testing.T) {
- last := MakeLastExecuting(10, 10)
- assert.Empty(t, last.Collect())
-}
-
-func TestLastExecuting(t *testing.T) {
- last := MakeLastExecuting(10, 3)
- last.Note(0, []byte("prog1"), 1)
-
- last.Note(1, []byte("prog2"), 2)
- last.Note(1, []byte("prog3"), 3)
-
- last.Note(3, []byte("prog4"), 4)
- last.Note(3, []byte("prog5"), 5)
- last.Note(3, []byte("prog6"), 6)
-
- last.Note(7, []byte("prog7"), 7)
- last.Note(7, []byte("prog8"), 8)
- last.Note(7, []byte("prog9"), 9)
- last.Note(7, []byte("prog10"), 10)
- last.Note(7, []byte("prog11"), 11)
-
- last.Note(9, []byte("prog12"), 12)
-
- last.Note(8, []byte("prog13"), 13)
-
- assert.Equal(t, last.Collect(), []ExecRecord{
- {Proc: 0, Prog: []byte("prog1"), Time: 12},
-
- {Proc: 1, Prog: []byte("prog2"), Time: 11},
- {Proc: 1, Prog: []byte("prog3"), Time: 10},
-
- {Proc: 3, Prog: []byte("prog4"), Time: 9},
- {Proc: 3, Prog: []byte("prog5"), Time: 8},
- {Proc: 3, Prog: []byte("prog6"), Time: 7},
-
- {Proc: 7, Prog: []byte("prog9"), Time: 4},
- {Proc: 7, Prog: []byte("prog10"), Time: 3},
- {Proc: 7, Prog: []byte("prog11"), Time: 2},
-
- {Proc: 9, Prog: []byte("prog12"), Time: 1},
-
- {Proc: 8, Prog: []byte("prog13"), Time: 0},
- })
-}
diff --git a/syz-manager/manager.go b/syz-manager/manager.go
index 2aef40475..d396bedb3 100644
--- a/syz-manager/manager.go
+++ b/syz-manager/manager.go
@@ -16,6 +16,8 @@ import (
"os"
"os/exec"
"path/filepath"
+ "runtime"
+ "strings"
"sync"
"sync/atomic"
"time"
@@ -23,6 +25,8 @@ import (
"github.com/google/syzkaller/dashboard/dashapi"
"github.com/google/syzkaller/pkg/asset"
"github.com/google/syzkaller/pkg/corpus"
+ "github.com/google/syzkaller/pkg/cover"
+ "github.com/google/syzkaller/pkg/cover/backend"
"github.com/google/syzkaller/pkg/csource"
"github.com/google/syzkaller/pkg/db"
"github.com/google/syzkaller/pkg/flatrpc"
@@ -30,13 +34,14 @@ import (
"github.com/google/syzkaller/pkg/fuzzer/queue"
"github.com/google/syzkaller/pkg/gce"
"github.com/google/syzkaller/pkg/hash"
- "github.com/google/syzkaller/pkg/instance"
"github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/mgrconfig"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/report"
crash_pkg "github.com/google/syzkaller/pkg/report/crash"
"github.com/google/syzkaller/pkg/repro"
+ "github.com/google/syzkaller/pkg/rpcserver"
+ "github.com/google/syzkaller/pkg/runtest"
"github.com/google/syzkaller/pkg/signal"
"github.com/google/syzkaller/pkg/stats"
"github.com/google/syzkaller/prog"
@@ -58,7 +63,9 @@ var (
" If the kernel oopses during testing, the report is saved to workdir/report.json.\n"+
" - corpus-triage: triage corpus and exit\n"+
" This is useful mostly for benchmarking with testbed.\n"+
- " - corpus-run: continuously run the corpus programs.\n")
+ " - corpus-run: continuously run the corpus programs.\n"+
+ " - run-tests: run unit tests\n"+
+ " Run sys/os/test/* tests in various modes and print results.\n")
)
type Manager struct {
@@ -69,19 +76,20 @@ type Manager struct {
sysTarget *targets.Target
reporter *report.Reporter
crashdir string
- serv *RPCServer
+ serv *rpcserver.Server
corpus *corpus.Corpus
corpusDB *db.DB
corpusDBMu sync.Mutex // for concurrent operations on corpusDB
- corpusPreloaded chan bool
+ corpusPreload chan []fuzzer.Candidate
firstConnect atomic.Int64 // unix time, or 0 if not connected
crashTypes map[string]bool
vmStop chan bool
enabledFeatures flatrpc.Feature
- checkDone bool
+ checkDone atomic.Bool
fresh bool
expertMode bool
- nextInstanceID atomic.Uint64
+ modules []*cover.KernelModule
+ coverFilter map[uint64]struct{} // includes only coverage PCs
dash *dashapi.Dashboard
@@ -91,7 +99,6 @@ type Manager struct {
targetEnabledSyscalls map[*prog.Syscall]bool
disabledHashes map[string]struct{}
- seeds [][]byte
newRepros [][]byte
lastMinCorpus int
memoryLeakFrames map[string]bool
@@ -124,6 +131,7 @@ const (
ModeSmokeTest
ModeCorpusTriage
ModeCorpusRun
+ ModeRunTests
)
const (
@@ -184,6 +192,10 @@ func RunManager(cfg *mgrconfig.Config) {
mode = ModeCorpusRun
cfg.HubClient = ""
cfg.DashboardClient = ""
+ case "run-tests":
+ mode = ModeRunTests
+ cfg.DashboardClient = ""
+ cfg.HubClient = ""
default:
flag.PrintDefaults()
log.Fatalf("unknown mode: %v", *flagMode)
@@ -212,7 +224,7 @@ func RunManager(cfg *mgrconfig.Config) {
mode: mode,
vmPool: vmPool,
corpus: corpus.NewMonitoredCorpus(context.Background(), corpusUpdates),
- corpusPreloaded: make(chan bool),
+ corpusPreload: make(chan []fuzzer.Candidate),
target: cfg.Target,
sysTarget: cfg.SysTarget,
reporter: reporter,
@@ -231,16 +243,19 @@ func RunManager(cfg *mgrconfig.Config) {
}
mgr.initStats()
- go mgr.preloadCorpus()
+ if mode == ModeFuzzing || mode == ModeCorpusTriage {
+ go mgr.preloadCorpus()
+ }
mgr.initHTTP() // Creates HTTP server.
mgr.collectUsedFiles()
go mgr.corpusInputHandler(corpusUpdates)
// Create RPC server for fuzzers.
- mgr.serv, err = startRPCServer(mgr)
+ mgr.serv, err = rpcserver.New(mgr.cfg, mgr, *flagDebug)
if err != nil {
log.Fatalf("failed to create rpc server: %v", err)
}
+ log.Logf(0, "serving rpc on tcp://%v", mgr.serv.Port)
if cfg.DashboardAddr != "" {
mgr.dash, err = dashapi.New(cfg.DashboardClient, cfg.DashboardAddr, cfg.DashboardKey)
@@ -266,8 +281,8 @@ func RunManager(cfg *mgrconfig.Config) {
}
if mgr.vmPool == nil {
log.Logf(0, "no VMs started (type=none)")
- log.Logf(0, "you are supposed to start syz-fuzzer manually as:")
- log.Logf(0, "syz-fuzzer -manager=manager.ip:%v [other flags as necessary]", mgr.serv.port)
+ log.Logf(0, "you are supposed to start syz-executor manually as:")
+ log.Logf(0, "syz-executor runner local manager.ip %v", mgr.serv.Port)
<-vm.Shutdown
return
}
@@ -291,7 +306,7 @@ func (mgr *Manager) heartbeatLoop() {
if mgr.firstConnect.Load() == 0 {
continue
}
- mgr.statFuzzingTime.Add(diff * mgr.serv.statNumFuzzing.Val())
+ mgr.statFuzzingTime.Add(diff * mgr.serv.StatNumFuzzing.Val())
buf := new(bytes.Buffer)
for _, stat := range stats.Collect(stats.Console) {
fmt.Fprintf(buf, "%v=%v ", stat.Name, stat.Value)
@@ -630,122 +645,150 @@ func (mgr *Manager) preloadCorpus() {
log.Errorf("read %v inputs from corpus and got error: %v", len(corpusDB.Records), err)
}
mgr.corpusDB = corpusDB
-
- if seedDir := filepath.Join(mgr.cfg.Syzkaller, "sys", mgr.cfg.TargetOS, "test"); osutil.IsExist(seedDir) {
- seeds, err := os.ReadDir(seedDir)
- if err != nil {
- log.Fatalf("failed to read seeds dir: %v", err)
- }
- for _, seed := range seeds {
- data, err := os.ReadFile(filepath.Join(seedDir, seed.Name()))
- if err != nil {
- log.Fatalf("failed to read seed %v: %v", seed.Name(), err)
- }
- mgr.seeds = append(mgr.seeds, data)
- }
- }
- close(mgr.corpusPreloaded)
-}
-
-func (mgr *Manager) loadCorpus() []fuzzer.Candidate {
- <-mgr.corpusPreloaded
+ mgr.fresh = len(mgr.corpusDB.Records) == 0
// By default we don't re-minimize/re-smash programs from corpus,
// it takes lots of time on start and is unnecessary.
// However, on version bumps we can selectively re-minimize/re-smash.
- flags := fuzzer.ProgFromCorpus | fuzzer.ProgMinimized | fuzzer.ProgSmashed
+ corpusFlags := fuzzer.ProgFromCorpus | fuzzer.ProgMinimized | fuzzer.ProgSmashed
switch mgr.corpusDB.Version {
case 0:
// Version 0 had broken minimization, so we need to re-minimize.
- flags &= ^fuzzer.ProgMinimized
+ corpusFlags &= ^fuzzer.ProgMinimized
fallthrough
case 1:
// Version 1->2: memory is preallocated so lots of mmaps become unnecessary.
- flags &= ^fuzzer.ProgMinimized
+ corpusFlags &= ^fuzzer.ProgMinimized
fallthrough
case 2:
// Version 2->3: big-endian hints.
- flags &= ^fuzzer.ProgSmashed
+ corpusFlags &= ^fuzzer.ProgSmashed
fallthrough
case 3:
// Version 3->4: to shake things up.
- flags &= ^fuzzer.ProgMinimized
+ corpusFlags &= ^fuzzer.ProgMinimized
fallthrough
case 4:
// Version 4->5: fix for comparison argument sign extension.
- flags &= ^fuzzer.ProgSmashed
+ corpusFlags &= ^fuzzer.ProgSmashed
fallthrough
case currentDBVersion:
}
- var candidates []fuzzer.Candidate
- broken := 0
- for key, rec := range mgr.corpusDB.Records {
- drop, item := mgr.loadProg(rec.Val, flags)
- if drop {
- mgr.corpusDB.Delete(key)
- broken++
+ type Input struct {
+ IsSeed bool
+ Key string
+ Data []byte
+ Prog *prog.Prog
+ }
+ procs := runtime.GOMAXPROCS(0)
+ inputs := make(chan *Input, procs)
+ outputs := make(chan *Input, procs)
+ var wg sync.WaitGroup
+ wg.Add(procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ defer wg.Done()
+ for inp := range inputs {
+ inp.Prog, _ = loadProg(mgr.target, inp.Data)
+ outputs <- inp
+ }
+ }()
+ }
+ go func() {
+ wg.Wait()
+ close(outputs)
+ }()
+ go func() {
+ for key, rec := range mgr.corpusDB.Records {
+ inputs <- &Input{
+ Key: key,
+ Data: rec.Val,
+ }
}
- if item != nil {
- candidates = append(candidates, *item)
+ seedDir := filepath.Join(mgr.cfg.Syzkaller, "sys", mgr.cfg.TargetOS, "test")
+ if osutil.IsExist(seedDir) {
+ seeds, err := os.ReadDir(seedDir)
+ if err != nil {
+ log.Fatalf("failed to read seeds dir: %v", err)
+ }
+ for _, seed := range seeds {
+ data, err := os.ReadFile(filepath.Join(seedDir, seed.Name()))
+ if err != nil {
+ log.Fatalf("failed to read seed %v: %v", seed.Name(), err)
+ }
+ inputs <- &Input{
+ IsSeed: true,
+ Data: data,
+ }
+ }
}
- }
- mgr.fresh = len(mgr.corpusDB.Records) == 0
- seeds := 0
- for _, seed := range mgr.seeds {
- // Seeds are not considered "from corpus" (won't be rerun multiple times)
- // b/c they are tried on every start anyway.
- _, item := mgr.loadProg(seed, fuzzer.ProgMinimized)
- if item == nil {
+ close(inputs)
+ }()
+ brokenSeeds := 0
+ var brokenCorpus []string
+ var candidates []fuzzer.Candidate
+ for inp := range outputs {
+ if inp.Prog == nil {
+ if inp.IsSeed {
+ brokenSeeds++
+ } else {
+ brokenCorpus = append(brokenCorpus, inp.Key)
+ }
continue
}
- if _, ok := mgr.corpusDB.Records[hash.String(item.Prog.Serialize())]; ok {
- continue
+ flags := corpusFlags
+ if inp.IsSeed {
+ if _, ok := mgr.corpusDB.Records[hash.String(inp.Prog.Serialize())]; ok {
+ continue
+ }
+ // Seeds are not considered "from corpus" (won't be rerun multiple times)
+ // b/c they are tried on every start anyway.
+ flags = fuzzer.ProgMinimized
}
- candidates = append(candidates, *item)
- seeds++
+ candidates = append(candidates, fuzzer.Candidate{
+ Prog: inp.Prog,
+ Flags: flags,
+ })
}
- log.Logf(0, "%-24v: %v (%v broken, %v seeds)", "corpus", len(candidates), broken, seeds)
- mgr.seeds = nil
-
- return candidates
+ if len(brokenCorpus)+brokenSeeds != 0 {
+ log.Logf(0, "broken programs in the corpus: %v, broken seeds: %v", len(brokenCorpus), brokenSeeds)
+ }
+ // This needs to be done outside of the loop above to not race with mgr.corpusDB reads.
+ for _, sig := range brokenCorpus {
+ mgr.corpusDB.Delete(sig)
+ }
+ mgr.corpusPreload <- candidates
}
-// Returns (delete item from the corpus, a fuzzer.Candidate object).
-func (mgr *Manager) loadProg(data []byte, flags fuzzer.ProgFlags) (drop bool, candidate *fuzzer.Candidate) {
- p, disabled, bad := parseProgram(mgr.target, mgr.targetEnabledSyscalls, data)
- if bad != nil {
- return true, nil
- }
- if disabled {
- if mgr.cfg.PreserveCorpus {
- // This program contains a disabled syscall.
- // We won't execute it, but remember its hash so
- // it is not deleted during minimization.
- mgr.disabledHashes[hash.String(data)] = struct{}{}
- } else {
- // We cut out the disabled syscalls and let syz-fuzzer retriage and
- // minimize what remains from the prog. The original prog will be
- // deleted from the corpus.
- leftover := programLeftover(mgr.target, mgr.targetEnabledSyscalls, data)
- if leftover != nil {
- candidate = &fuzzer.Candidate{
- Prog: leftover,
- Flags: flags & ^fuzzer.ProgMinimized,
- }
+func (mgr *Manager) loadCorpus() []fuzzer.Candidate {
+ seeds := 0
+ var candidates []fuzzer.Candidate
+ for _, item := range <-mgr.corpusPreload {
+ if containsDisabled(item.Prog, mgr.targetEnabledSyscalls) {
+ if mgr.cfg.PreserveCorpus {
+ // This program contains a disabled syscall.
+ // We won't execute it, but remember its hash so
+ // it is not deleted during minimization.
+ mgr.disabledHashes[hash.String(item.Prog.Serialize())] = struct{}{}
+ continue
+ }
+ // We cut out the disabled syscalls and retriage/minimize what remains from the prog.
+ // The original prog will be deleted from the corpus.
+ item.Flags &= ^fuzzer.ProgMinimized
+ programLeftover(mgr.target, mgr.targetEnabledSyscalls, item.Prog)
+ if len(item.Prog.Calls) == 0 {
+ continue
}
}
- return false, candidate
- }
- return false, &fuzzer.Candidate{
- Prog: p,
- Flags: flags,
+ if item.Flags&fuzzer.ProgFromCorpus == 0 {
+ seeds++
+ }
+ candidates = append(candidates, item)
}
+ log.Logf(0, "%-24v: %v (%v seeds)", "corpus", len(candidates), seeds)
+ return candidates
}
-func programLeftover(target *prog.Target, enabled map[*prog.Syscall]bool, data []byte) *prog.Prog {
- p, err := target.Deserialize(data, prog.NonStrict)
- if err != nil {
- panic(fmt.Sprintf("subsequent deserialization failed: %s", data))
- }
+func programLeftover(target *prog.Target, enabled map[*prog.Syscall]bool, p *prog.Prog) {
for i := 0; i < len(p.Calls); {
c := p.Calls[i]
if !enabled[c.Meta] {
@@ -754,41 +797,42 @@ func programLeftover(target *prog.Target, enabled map[*prog.Syscall]bool, data [
}
i++
}
- return p
}
-func parseProgram(target *prog.Target, enabled map[*prog.Syscall]bool, data []byte) (
- p *prog.Prog, disabled bool, err error) {
- p, err = target.Deserialize(data, prog.NonStrict)
+func loadProg(target *prog.Target, data []byte) (*prog.Prog, error) {
+ p, err := target.Deserialize(data, prog.NonStrict)
if err != nil {
- return
+ return nil, err
}
if len(p.Calls) > prog.MaxCalls {
- return nil, false, fmt.Errorf("longer than %d calls", prog.MaxCalls)
+ return nil, fmt.Errorf("longer than %d calls", prog.MaxCalls)
}
// For some yet unknown reasons, programs with fail_nth > 0 may sneak in. Ignore them.
for _, call := range p.Calls {
if call.Props.FailNth > 0 {
- return nil, false, fmt.Errorf("input has fail_nth > 0")
+ return nil, fmt.Errorf("input has fail_nth > 0")
}
}
+ return p, nil
+}
+
+func containsDisabled(p *prog.Prog, enabled map[*prog.Syscall]bool) bool {
for _, c := range p.Calls {
if !enabled[c.Meta] {
- return p, true, nil
+ return true
}
}
- return p, false, nil
+ return false
}
func (mgr *Manager) runInstance(index int) (*Crash, error) {
mgr.checkUsedFiles()
- // Use unique instance names to prevent name collisions in case of untimely RPC messages.
- instanceName := fmt.Sprintf("vm-%d", mgr.nextInstanceID.Add(1))
+ instanceName := fmt.Sprintf("vm-%d", index)
injectExec := make(chan bool, 10)
- mgr.serv.createInstance(instanceName, injectExec)
+ mgr.serv.CreateInstance(instanceName, injectExec)
rep, vmInfo, err := mgr.runInstanceInner(index, instanceName, injectExec)
- lastExec, machineInfo := mgr.serv.shutdownInstance(instanceName, rep != nil)
+ lastExec, machineInfo := mgr.serv.ShutdownInstance(instanceName, rep != nil)
if rep != nil {
prependExecuting(rep, lastExec)
if len(vmInfo) != 0 {
@@ -838,16 +882,11 @@ func (mgr *Manager) runInstanceInner(index int, instanceName string, injectExec
}
defer inst.Close()
- fwdAddr, err := inst.Forward(mgr.serv.port)
+ fwdAddr, err := inst.Forward(mgr.serv.Port)
if err != nil {
return nil, nil, fmt.Errorf("failed to setup port forwarding: %w", err)
}
- fuzzerBin, err := inst.Copy(mgr.cfg.FuzzerBin)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to copy binary: %w", err)
- }
-
// If ExecutorBin is provided, it means that syz-executor is already in the image,
// so no need to copy it.
executorBin := mgr.sysTarget.ExecutorBin
@@ -858,9 +897,7 @@ func (mgr *Manager) runInstanceInner(index int, instanceName string, injectExec
}
}
- fuzzerV := 0
if *flagDebug {
- fuzzerV = 100
mgr.cfg.Procs = 1
}
@@ -868,31 +905,15 @@ func (mgr *Manager) runInstanceInner(index int, instanceName string, injectExec
mgr.bootTime.Save(time.Since(start))
start = time.Now()
- args := &instance.FuzzerCmdArgs{
- Fuzzer: fuzzerBin,
- Executor: executorBin,
- Name: instanceName,
- OS: mgr.cfg.TargetOS,
- Arch: mgr.cfg.TargetArch,
- FwdAddr: fwdAddr,
- Sandbox: mgr.cfg.Sandbox,
- Verbosity: fuzzerV,
- Cover: mgr.cfg.Cover,
- Debug: *flagDebug,
- Optional: &instance.OptionalFuzzerArgs{
- Slowdown: mgr.cfg.Timeouts.Slowdown,
- SandboxArg: mgr.cfg.SandboxArg,
- PprofPort: inst.PprofPort(),
- },
- }
- cmd := instance.FuzzerCmd(args)
+ addrPort := strings.Split(fwdAddr, ":")
+ cmd := fmt.Sprintf("%v runner %v %v %v", executorBin, instanceName, addrPort[0], addrPort[1])
_, rep, err := inst.Run(mgr.cfg.Timeouts.VMRunningTime, mgr.reporter, cmd,
vm.ExitTimeout, vm.StopChan(mgr.vmStop), vm.InjectExecuting(injectExec),
vm.EarlyFinishCb(func() {
// Depending on the crash type and kernel config, fuzzing may continue
// running for several seconds even after kernel has printed a crash report.
// This litters the log and we want to prevent it.
- mgr.serv.stopFuzzing(instanceName)
+ mgr.serv.StopFuzzing(instanceName)
}),
)
if err != nil {
@@ -910,11 +931,11 @@ func (mgr *Manager) runInstanceInner(index int, instanceName string, injectExec
return rep, vmInfo, nil
}
-func prependExecuting(rep *report.Report, lastExec []ExecRecord) {
+func prependExecuting(rep *report.Report, lastExec []rpcserver.ExecRecord) {
buf := new(bytes.Buffer)
fmt.Fprintf(buf, "last executing test programs:\n\n")
for _, exec := range lastExec {
- fmt.Fprintf(buf, "%v ago: executing program %v:\n%s\n", exec.Time, exec.Proc, exec.Prog)
+ fmt.Fprintf(buf, "%v ago: executing program %v (id=%v):\n%s\n", exec.Time, exec.Proc, exec.ID, exec.Prog)
}
fmt.Fprintf(buf, "kernel console output (not intermixed with test programs):\n\n")
rep.Output = append(buf.Bytes(), rep.Output...)
@@ -1079,7 +1100,7 @@ func (mgr *Manager) needRepro(crash *Crash) bool {
if crash.fromHub || crash.fromDashboard {
return true
}
- if !mgr.checkDone || (mgr.enabledFeatures&flatrpc.FeatureLeak != 0 &&
+ if !mgr.checkDone.Load() || (mgr.enabledFeatures&flatrpc.FeatureLeak != 0 &&
crash.Type != crash_pkg.MemoryLeak) {
// Leak checking is very slow, don't bother reproducing other crashes on leak instance.
return false
@@ -1291,7 +1312,16 @@ func fullReproLog(stats *repro.Stats) []byte {
func (mgr *Manager) corpusInputHandler(updates <-chan corpus.NewItemEvent) {
for update := range updates {
- mgr.serv.updateCoverFilter(update.NewCover)
+ if len(update.NewCover) != 0 && mgr.coverFilter != nil {
+ filtered := 0
+ for _, pc := range update.NewCover {
+ pc = backend.PreviousInstructionPC(mgr.cfg.SysTarget, mgr.cfg.Type, pc)
+ if _, ok := mgr.coverFilter[pc]; ok {
+ filtered++
+ }
+ }
+ mgr.statCoverFiltered.Add(filtered)
+ }
if update.Exists {
// We only save new progs into the corpus.db file.
continue
@@ -1417,24 +1447,22 @@ func (mgr *Manager) collectSyscallInfo() map[string]*corpus.CallCov {
return calls
}
-func (mgr *Manager) currentBugFrames() BugFrames {
+func (mgr *Manager) BugFrames() (leaks, races []string) {
mgr.mu.Lock()
defer mgr.mu.Unlock()
- frames := BugFrames{
- memoryLeaks: make([]string, 0, len(mgr.memoryLeakFrames)),
- dataRaces: make([]string, 0, len(mgr.dataRaceFrames)),
- }
for frame := range mgr.memoryLeakFrames {
- frames.memoryLeaks = append(frames.memoryLeaks, frame)
+ leaks = append(leaks, frame)
}
for frame := range mgr.dataRaceFrames {
- frames.dataRaces = append(frames.dataRaces, frame)
+ races = append(races, frame)
}
- return frames
+ return
}
-func (mgr *Manager) machineChecked(features flatrpc.Feature, enabledSyscalls map[*prog.Syscall]bool,
- opts flatrpc.ExecOpts) queue.Source {
+func (mgr *Manager) MachineChecked(features flatrpc.Feature, enabledSyscalls map[*prog.Syscall]bool) queue.Source {
+ if len(enabledSyscalls) == 0 {
+ log.Fatalf("all system calls are disabled")
+ }
if mgr.mode == ModeSmokeTest {
mgr.exit("smoke test")
}
@@ -1444,25 +1472,23 @@ func (mgr *Manager) machineChecked(features flatrpc.Feature, enabledSyscalls map
if mgr.phase != phaseInit {
panic("machineChecked() called not during phaseInit")
}
- if mgr.checkDone {
- panic("machineChecked() called twice")
+ if mgr.checkDone.Swap(true) {
+ panic("MachineChecked called twice")
}
- mgr.checkDone = true
mgr.enabledFeatures = features
mgr.targetEnabledSyscalls = enabledSyscalls
mgr.firstConnect.Store(time.Now().Unix())
statSyscalls := stats.Create("syscalls", "Number of enabled syscalls",
stats.Simple, stats.NoGraph, stats.Link("/syscalls"))
statSyscalls.Add(len(enabledSyscalls))
-
corpus := mgr.loadCorpus()
mgr.phase = phaseLoadedCorpus
+ opts := mgr.defaultExecOpts()
if mgr.mode == ModeFuzzing {
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
fuzzerObj := fuzzer.NewFuzzer(context.Background(), &fuzzer.Config{
Corpus: mgr.corpus,
- BaseOpts: opts,
Coverage: mgr.cfg.Cover,
FaultInjection: features&flatrpc.FeatureFault != 0,
Comparisons: features&flatrpc.FeatureComparisons != 0,
@@ -1493,20 +1519,39 @@ func (mgr *Manager) machineChecked(features flatrpc.Feature, enabledSyscalls map
go mgr.dashboardReproTasks()
}
}
- return fuzzerObj
+ return queue.DefaultOpts(fuzzerObj, opts)
} else if mgr.mode == ModeCorpusRun {
- return &corpusRunner{
+ ctx := &corpusRunner{
candidates: corpus,
- opts: opts,
rnd: rand.New(rand.NewSource(time.Now().UnixNano())),
}
+ return queue.DefaultOpts(ctx, opts)
+ } else if mgr.mode == ModeRunTests {
+ ctx := &runtest.Context{
+ Dir: filepath.Join(mgr.cfg.Syzkaller, "sys", mgr.cfg.Target.OS, "test"),
+ Target: mgr.cfg.Target,
+ Features: features,
+ EnabledCalls: map[string]map[*prog.Syscall]bool{
+ mgr.cfg.Sandbox: enabledSyscalls,
+ },
+ LogFunc: func(text string) { fmt.Println(text) },
+ Verbose: true,
+ Debug: *flagDebug,
+ }
+ go func() {
+ err := ctx.Run()
+ if err != nil {
+ log.Fatal(err)
+ }
+ mgr.exit("tests")
+ }()
+ return ctx
}
panic(fmt.Sprintf("unexpected mode %q", mgr.mode))
}
type corpusRunner struct {
candidates []fuzzer.Candidate
- opts flatrpc.ExecOpts
mu sync.Mutex
rnd *rand.Rand
seq int
@@ -1527,11 +1572,35 @@ func (cr *corpusRunner) Next() *queue.Request {
}
return &queue.Request{
Prog: p,
- ExecOpts: cr.opts,
Important: true,
}
}
+func (mgr *Manager) defaultExecOpts() flatrpc.ExecOpts {
+ env := csource.FeaturesToFlags(mgr.enabledFeatures, nil)
+ if mgr.cfg.Experimental.ResetAccState {
+ env |= flatrpc.ExecEnvResetState
+ }
+ if mgr.cfg.Cover {
+ env |= flatrpc.ExecEnvSignal
+ }
+ sandbox, err := flatrpc.SandboxToFlags(mgr.cfg.Sandbox)
+ if err != nil {
+ panic(fmt.Sprintf("failed to parse sandbox: %v", err))
+ }
+ env |= sandbox
+
+ exec := flatrpc.ExecFlagThreaded
+ if !mgr.cfg.RawCover {
+ exec |= flatrpc.ExecFlagDedupCover
+ }
+ return flatrpc.ExecOpts{
+ EnvFlags: env,
+ ExecFlags: exec,
+ SandboxArg: mgr.cfg.SandboxArg,
+ }
+}
+
func (mgr *Manager) corpusMinimization() {
for range time.NewTicker(time.Minute).C {
mgr.mu.Lock()
@@ -1540,7 +1609,7 @@ func (mgr *Manager) corpusMinimization() {
}
}
-func (mgr *Manager) maxSignal() signal.Signal {
+func (mgr *Manager) MaxSignal() signal.Signal {
if fuzzer := mgr.fuzzer.Load(); fuzzer != nil {
return fuzzer.Cover.CopyMaxSignal()
}
@@ -1559,7 +1628,7 @@ func (mgr *Manager) fuzzerSignalRotation() {
lastExecTotal := 0
lastRotation := time.Now()
for range time.NewTicker(5 * time.Minute).C {
- if mgr.statExecs.Val()-lastExecTotal < execsBetweenRotates {
+ if mgr.serv.StatExecs.Val()-lastExecTotal < execsBetweenRotates {
continue
}
if time.Since(lastRotation) < timeBetweenRotates {
@@ -1567,7 +1636,7 @@ func (mgr *Manager) fuzzerSignalRotation() {
}
mgr.fuzzer.Load().RotateMaxSignal(rotateSignals)
lastRotation = time.Now()
- lastExecTotal = mgr.statExecs.Val()
+ lastExecTotal = mgr.serv.StatExecs.Val()
}
}
@@ -1579,7 +1648,7 @@ func (mgr *Manager) fuzzerLoop(fuzzer *fuzzer.Fuzzer) {
log.Logf(2, "distributing %d new signal, %d dropped signal",
len(newSignal), len(dropSignal))
if len(newSignal)+len(dropSignal) != 0 {
- mgr.serv.distributeSignalDelta(newSignal, dropSignal)
+ mgr.serv.DistributeSignalDelta(newSignal, dropSignal)
}
}
@@ -1591,7 +1660,7 @@ func (mgr *Manager) fuzzerLoop(fuzzer *fuzzer.Fuzzer) {
mgr.mu.Lock()
if mgr.phase == phaseLoadedCorpus {
if mgr.enabledFeatures&flatrpc.FeatureLeak != 0 {
- mgr.serv.startLeakChecking()
+ mgr.serv.TriagedCorpus()
}
go mgr.fuzzerSignalRotation()
if mgr.cfg.HubClient != "" {
@@ -1637,7 +1706,6 @@ func (mgr *Manager) collectUsedFiles() {
mgr.usedFiles[f] = stat.ModTime()
}
cfg := mgr.cfg
- addUsedFile(cfg.FuzzerBin)
addUsedFile(cfg.ExecprogBin)
addUsedFile(cfg.ExecutorBin)
addUsedFile(cfg.SSHKey)
@@ -1683,7 +1751,7 @@ func (mgr *Manager) dashboardReporter() {
FuzzingTime: time.Duration(mgr.statFuzzingTime.Val()) - lastFuzzingTime,
Crashes: uint64(mgr.statCrashes.Val()) - lastCrashes,
SuppressedCrashes: uint64(mgr.statSuppressed.Val()) - lastSuppressedCrashes,
- Execs: uint64(mgr.statExecs.Val()) - lastExecs,
+ Execs: uint64(mgr.serv.StatExecs.Val()) - lastExecs,
}
if mgr.phase >= phaseTriagedCorpus && !triageInfoSent {
triageInfoSent = true
diff --git a/syz-manager/stats.go b/syz-manager/stats.go
index 322782c30..0f49c33db 100644
--- a/syz-manager/stats.go
+++ b/syz-manager/stats.go
@@ -13,20 +13,18 @@ import (
type Stats struct {
statNumReproducing *stats.Val
- statExecs *stats.Val
statCrashes *stats.Val
statCrashTypes *stats.Val
statSuppressed *stats.Val
statUptime *stats.Val
statFuzzingTime *stats.Val
statAvgBootTime *stats.Val
+ statCoverFiltered *stats.Val
}
func (mgr *Manager) initStats() {
mgr.statNumReproducing = stats.Create("reproducing", "Number of crashes being reproduced",
stats.Console, stats.NoGraph)
- mgr.statExecs = stats.Create("exec total", "Total test program executions",
- stats.Console, stats.Rate{}, stats.Prometheus("syz_exec_total"))
mgr.statCrashes = stats.Create("crashes", "Total number of VM crashes",
stats.Simple, stats.Prometheus("syz_crash_total"))
mgr.statCrashTypes = stats.Create("crash types", "Number of unique crashes types",
@@ -70,4 +68,5 @@ func (mgr *Manager) initStats() {
}, func(v int, period time.Duration) string {
return fmt.Sprintf("%v MB", v>>20)
})
+ mgr.statCoverFiltered = stats.Create("filtered coverage", "", stats.NoGraph)
}
diff --git a/syz-runner/runner.go b/syz-runner/runner.go
deleted file mode 100644
index 5a9248611..000000000
--- a/syz-runner/runner.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2021 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-// TODO: switch syz-verifier to use syz-fuzzer.
-
-//go:build never
-
-package main
-
-import (
- "flag"
- "log"
- "runtime"
-
- "github.com/google/syzkaller/pkg/ipc"
- "github.com/google/syzkaller/pkg/ipc/ipcconfig"
- "github.com/google/syzkaller/pkg/rpctype"
- "github.com/google/syzkaller/prog"
-)
-
-// Runner is responsible of running programs sent by the host via RPC and
-// reporting the execution results back to the host.
-type Runner struct {
- vrf *rpctype.RPCClient
- target *prog.Target
- opts *ipc.ExecOpts
- config *ipc.Config
- pool, vm int
- newEnv bool
-}
-
-func main() {
- flagPool := flag.Int("pool", 0, "index of pool it corresponds to")
- flagVM := flag.Int("vm", 0, "index of VM that started the Runner")
- flagAddr := flag.String("addr", "", "verifier rpc address")
- flagOS := flag.String("os", runtime.GOOS, "target OS")
- flagArch := flag.String("arch", runtime.GOARCH, "target arch")
- flagEnv := flag.Bool("new-env", true, "create a new environment for each program")
- flag.Parse()
-
- target, err := prog.GetTarget(*flagOS, *flagArch)
- if err != nil {
- log.Fatalf("failed to configure target: %v", err)
- }
-
- config, opts, err := ipcconfig.Default(target)
- if err != nil {
- log.Fatalf("failed to create default ipc config: %v", err)
- }
-
- vrf, err := rpctype.NewRPCClient(*flagAddr)
- if err != nil {
- log.Fatalf("failed to connect to verifier : %v", err)
- }
-
- rn := &Runner{
- vrf: vrf,
- target: target,
- opts: opts,
- config: config,
- pool: *flagPool,
- vm: *flagVM,
- newEnv: *flagEnv,
- }
-
- a := &rpctype.RunnerConnectArgs{
- Pool: rn.pool,
- VM: rn.vm,
- }
- r := &rpctype.RunnerConnectRes{}
- if err := vrf.Call("Verifier.Connect", a, r); err != nil {
- log.Fatalf("failed to connect to verifier: %v", err)
- }
-
- enabled := make(map[*prog.Syscall]bool)
- for _, c := range target.Syscalls {
- enabled[c] = true
- }
- if r.CheckUnsupportedCalls {
- a := &rpctype.UpdateUnsupportedArgs{Pool: rn.pool, UnsupportedCalls: nil}
- if err := vrf.Call("Verifier.UpdateUnsupported", a, nil); err != nil {
- log.Fatalf("failed to send unsupported system calls: %v", err)
- }
- }
-
- res := &rpctype.NextExchangeRes{}
- if err := rn.vrf.Call("Verifier.NextExchange", &rpctype.NextExchangeArgs{Pool: rn.pool, VM: rn.vm}, res); err != nil {
- log.Fatalf("failed to get initial program: %v", err)
- }
-
- rn.Run(res.Prog, res.ID)
-}
-
-// Run is responsible for requesting new programs from the verifier, executing them and then sending back the Result.
-// TODO: Implement functionality to execute several programs at once and send back a slice of results.
-func (rn *Runner) Run(firstProg []byte, taskID int64) {
- p, id := firstProg, taskID
-
- env, err := ipc.MakeEnv(rn.config, 0)
- if err != nil {
- log.Fatalf("failed to create initial execution environment: %v", err)
- }
-
- for {
- prog, err := rn.target.Deserialize(p, prog.NonStrict)
- if err != nil {
- log.Fatalf("failed to deserialise new program: %v", err)
- }
-
- log.Printf("executing program") // watchdog for monitor
- _, info, hanged, err := env.Exec(rn.opts, prog)
- if err != nil {
- log.Fatalf("failed to execute the program: %v", err)
- }
-
- a := &rpctype.NextExchangeArgs{
- Pool: rn.pool,
- VM: rn.vm,
- Hanged: hanged,
- Info: *info,
- ExecTaskID: id,
- }
-
- r := &rpctype.NextExchangeRes{}
- if err := rn.vrf.Call("Verifier.NextExchange", a, r); err != nil {
- log.Fatalf("failed to make exchange with verifier: %v", err)
- }
- p, id = r.Prog, r.ID
-
- if !rn.newEnv {
- continue
- }
-
- err = env.Close()
- if err != nil {
- log.Fatalf("failed to close the execution environment: %v", err)
- }
-
- env, err = ipc.MakeEnv(rn.config, 0)
- if err != nil {
- log.Fatalf("failed to create new execution environment: %v", err)
- }
- }
-}
diff --git a/tools/syz-execprog/execprog.go b/tools/syz-execprog/execprog.go
index 54a6f39a0..8fce0d961 100644
--- a/tools/syz-execprog/execprog.go
+++ b/tools/syz-execprog/execprog.go
@@ -15,6 +15,7 @@ import (
"runtime"
"strings"
"sync"
+ "sync/atomic"
"time"
"github.com/google/syzkaller/pkg/cover/backend"
@@ -22,12 +23,10 @@ import (
"github.com/google/syzkaller/pkg/db"
"github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
- "github.com/google/syzkaller/pkg/host"
- "github.com/google/syzkaller/pkg/ipc"
- "github.com/google/syzkaller/pkg/ipc/ipcconfig"
"github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/mgrconfig"
"github.com/google/syzkaller/pkg/osutil"
+ "github.com/google/syzkaller/pkg/rpcserver"
"github.com/google/syzkaller/pkg/tool"
"github.com/google/syzkaller/pkg/vminfo"
"github.com/google/syzkaller/prog"
@@ -36,15 +35,22 @@ import (
)
var (
- flagOS = flag.String("os", runtime.GOOS, "target os")
- flagArch = flag.String("arch", runtime.GOARCH, "target arch")
- flagCoverFile = flag.String("coverfile", "", "write coverage to the file")
- flagRepeat = flag.Int("repeat", 1, "repeat execution that many times (0 for infinite loop)")
- flagProcs = flag.Int("procs", 2*runtime.NumCPU(), "number of parallel processes to execute programs")
- flagOutput = flag.Bool("output", false, "write programs and results to stdout")
- flagHints = flag.Bool("hints", false, "do a hints-generation run")
- flagEnable = flag.String("enable", "none", "enable only listed additional features")
- flagDisable = flag.String("disable", "none", "enable all additional features except listed")
+ flagOS = flag.String("os", runtime.GOOS, "target os")
+ flagArch = flag.String("arch", runtime.GOARCH, "target arch")
+ flagCoverFile = flag.String("coverfile", "", "write coverage to the file")
+ flagRepeat = flag.Int("repeat", 1, "repeat execution that many times (0 for infinite loop)")
+ flagProcs = flag.Int("procs", 2*runtime.NumCPU(), "number of parallel processes to execute programs")
+ flagOutput = flag.Bool("output", false, "write programs and results to stdout")
+ flagHints = flag.Bool("hints", false, "do a hints-generation run")
+ flagEnable = flag.String("enable", "none", "enable only listed additional features")
+ flagDisable = flag.String("disable", "none", "enable all additional features except listed")
+ flagExecutor = flag.String("executor", "./syz-executor", "path to executor binary")
+ flagThreaded = flag.Bool("threaded", true, "use threaded mode in executor")
+ flagSignal = flag.Bool("cover", false, "collect feedback signals (coverage)")
+ flagSandbox = flag.String("sandbox", "none", "sandbox for fuzzing (none/setuid/namespace/android)")
+ flagSandboxArg = flag.Int("sandbox_arg", 0, "argument for sandbox runner to adjust it via config")
+ flagDebug = flag.Bool("debug", false, "debug output from executor")
+ flagSlowdown = flag.Int("slowdown", 1, "execution slowdown caused by emulation/instrumentation")
// The in the stress mode resembles simple unguided fuzzer.
// This mode can be used as an intermediate step when porting syzkaller to a new OS,
@@ -56,6 +62,8 @@ var (
flagStress = flag.Bool("stress", false, "enable stress mode (local fuzzer)")
flagSyscalls = flag.String("syscalls", "", "comma-separated list of enabled syscalls for the stress mode")
+ flagGDB = flag.Bool("gdb", false, "start executor under gdb")
+
// The following flag is only kept to let syzkaller remain compatible with older execprog versions.
// In order to test incoming patches or perform bug bisection, syz-ci must use the exact syzkaller
// version that detected the bug (as descriptions and syntax could've already been changed), and
@@ -68,7 +76,7 @@ var (
// of syzkaller, but do not process it, as there's no such functionality anymore.
// Note, however, that we do not have to do the same for `syz-prog2c`, as `collide` was there false
// by default.
- flagCollide = flag.Bool("collide", false, "(DEPRECATED) collide syscalls to provoke data races")
+ _ = flag.Bool("collide", false, "(DEPRECATED) collide syscalls to provoke data races")
)
func main() {
@@ -78,24 +86,23 @@ func main() {
csource.PrintAvailableFeaturesFlags()
}
defer tool.Init()()
- featuresFlags, err := csource.ParseFeaturesFlags(*flagEnable, *flagDisable, true)
+ target, err := prog.GetTarget(*flagOS, *flagArch)
if err != nil {
- log.Fatalf("%v", err)
+ tool.Fail(err)
}
- target, err := prog.GetTarget(*flagOS, *flagArch)
+ featureFlags, err := csource.ParseFeaturesFlags(*flagEnable, *flagDisable, true)
if err != nil {
log.Fatalf("%v", err)
}
-
- progs := loadPrograms(target, flag.Args())
- if !*flagStress && len(progs) == 0 {
- flag.Usage()
- os.Exit(1)
- }
- if *flagCollide {
- log.Logf(0, "note: setting -collide to true is deprecated now and has no effect")
+ features := flatrpc.AllFeatures
+ for feat := range flatrpc.EnumNamesFeature {
+ opt := csource.FlatRPCFeaturesToCSource[feat]
+ if opt != "" && !featureFlags[opt].Enabled {
+ features &= ^feat
+ }
}
+
var requestedSyscalls []int
if *flagStress {
syscallList := strings.Split(*flagSyscalls, ",")
@@ -104,146 +111,153 @@ func main() {
}
requestedSyscalls, err = mgrconfig.ParseEnabledSyscalls(target, syscallList, nil)
if err != nil {
- log.Fatalf("failed to parse enabled syscalls: %v", err)
+ tool.Failf("failed to parse enabled syscalls: %v", err)
}
}
- config, execOpts, syscalls, features := createConfig(target, featuresFlags, requestedSyscalls)
- var gateCallback func()
- if features&flatrpc.FeatureLeak != 0 {
- gateCallback = func() {
- output, err := osutil.RunCmd(10*time.Minute, "", config.Executor, "leak")
- if err != nil {
- os.Stdout.Write(output)
- os.Exit(1)
- }
- }
+
+ sandbox, err := flatrpc.SandboxToFlags(*flagSandbox)
+ if err != nil {
+ tool.Failf("failed to parse sandbox: %v", err)
}
- var choiceTable *prog.ChoiceTable
- if *flagStress {
- choiceTable = target.BuildChoiceTable(progs, syscalls)
+ env := sandbox
+ if *flagDebug {
+ env |= flatrpc.ExecEnvDebug
+ }
+ cover := *flagSignal || *flagHints || *flagCoverFile != ""
+ if cover {
+ env |= flatrpc.ExecEnvSignal
}
- sysTarget := targets.Get(*flagOS, *flagArch)
+ var exec flatrpc.ExecFlag
+ if *flagThreaded {
+ exec |= flatrpc.ExecFlagThreaded
+ }
+ if *flagCoverFile == "" {
+ exec |= flatrpc.ExecFlagDedupCover
+ }
+
+ progs := loadPrograms(target, flag.Args())
+ if !*flagStress && len(progs) == 0 {
+ flag.Usage()
+ os.Exit(1)
+ }
+ rpcCtx, done := context.WithCancel(context.Background())
ctx := &Context{
- target: target,
- progs: progs,
- choiceTable: choiceTable,
- config: config,
- execOpts: execOpts,
- gate: ipc.NewGate(2**flagProcs, gateCallback),
- shutdown: make(chan struct{}),
- stress: *flagStress,
- repeat: *flagRepeat,
- sysTarget: sysTarget,
- }
- var wg sync.WaitGroup
- wg.Add(*flagProcs)
- for p := 0; p < *flagProcs; p++ {
- pid := p
- go func() {
- defer wg.Done()
- ctx.run(pid)
- }()
- }
- osutil.HandleInterrupts(ctx.shutdown)
- wg.Wait()
+ target: target,
+ done: done,
+ progs: progs,
+ rs: rand.NewSource(time.Now().UnixNano()),
+ coverFile: *flagCoverFile,
+ output: *flagOutput,
+ signal: *flagSignal,
+ hints: *flagHints,
+ stress: *flagStress,
+ repeat: *flagRepeat,
+ defaultOpts: flatrpc.ExecOpts{
+ EnvFlags: env,
+ ExecFlags: exec,
+ SandboxArg: int64(*flagSandboxArg),
+ },
+ }
+
+ cfg := &rpcserver.LocalConfig{
+ Config: rpcserver.Config{
+ Config: vminfo.Config{
+ Target: target,
+ Features: features,
+ Syscalls: requestedSyscalls,
+ Debug: *flagDebug,
+ Cover: cover,
+ Sandbox: sandbox,
+ SandboxArg: int64(*flagSandboxArg),
+ },
+ Procs: *flagProcs,
+ Slowdown: *flagSlowdown,
+ },
+ Executor: *flagExecutor,
+ HandleInterrupts: true,
+ GDB: *flagGDB,
+ Context: rpcCtx,
+ MachineChecked: ctx.machineChecked,
+ }
+ if err := rpcserver.RunLocal(cfg); err != nil {
+ tool.Fail(err)
+ }
}
type Context struct {
target *prog.Target
+ done func()
progs []*prog.Prog
+ defaultOpts flatrpc.ExecOpts
choiceTable *prog.ChoiceTable
- config *ipc.Config
- execOpts *flatrpc.ExecOpts
- gate *ipc.Gate
- shutdown chan struct{}
logMu sync.Mutex
posMu sync.Mutex
+ rs rand.Source
+ coverFile string
+ output bool
+ signal bool
+ hints bool
stress bool
repeat int
pos int
+ completed atomic.Uint64
+ resultIndex atomic.Int64
lastPrint time.Time
- sysTarget *targets.Target
}
-func (ctx *Context) run(pid int) {
- env, err := ipc.MakeEnv(ctx.config, pid)
- if err != nil {
- log.Fatalf("failed to create ipc env: %v", err)
- }
- defer env.Close()
- rs := rand.NewSource(time.Now().UnixNano() + int64(pid)*1e12)
- for {
- select {
- case <-ctx.shutdown:
- return
- default:
- }
- if ctx.stress {
- p := ctx.createStressProg(rs)
- ctx.execute(pid, env, p, 0)
- } else {
- idx := ctx.getProgramIndex()
- if ctx.repeat > 0 && idx >= len(ctx.progs)*ctx.repeat {
- return
- }
- p := ctx.progs[idx%len(ctx.progs)]
- ctx.execute(pid, env, p, idx)
- }
+func (ctx *Context) machineChecked(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source {
+ if ctx.stress {
+ ctx.choiceTable = ctx.target.BuildChoiceTable(ctx.progs, syscalls)
}
+ ctx.defaultOpts.EnvFlags |= csource.FeaturesToFlags(features, nil)
+ return queue.DefaultOpts(ctx, ctx.defaultOpts)
}
-func (ctx *Context) execute(pid int, env *ipc.Env, p *prog.Prog, progIndex int) {
- // Limit concurrency window.
- ticket := ctx.gate.Enter()
- defer ctx.gate.Leave(ticket)
+func (ctx *Context) Next() *queue.Request {
+ var p *prog.Prog
+ if ctx.stress {
+ p = ctx.createStressProg()
+ } else {
+ idx := ctx.getProgramIndex()
+ if idx < 0 {
+ return nil
+ }
+ p = ctx.progs[idx]
+ }
+ if ctx.output {
+ data := p.Serialize()
+ ctx.logMu.Lock()
+ log.Logf(0, "executing program:\n%s", data)
+ ctx.logMu.Unlock()
+ }
- callOpts := ctx.execOpts
- if *flagOutput {
- ctx.logProgram(pid, p)
+ req := &queue.Request{
+ Prog: p,
}
- progData, err := p.SerializeForExec()
- if err != nil {
- log.Logf(1, "RESULT: failed to serialize: %v", err)
- return
+ if ctx.hints {
+ req.ExecOpts.ExecFlags |= flatrpc.ExecFlagCollectComps
+ } else if ctx.signal || ctx.coverFile != "" {
+ req.ExecOpts.ExecFlags |= flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover
}
- // This mimics the syz-fuzzer logic. This is important for reproduction.
- for try := 0; ; try++ {
- output, info, hanged, err := env.ExecProg(callOpts, progData)
- if err != nil {
- if ctx.execOpts.EnvFlags&flatrpc.ExecEnvDebug != 0 {
- log.Logf(0, "result: hanged=%v err=%v\n\n%s", hanged, err, output)
- }
- if try > 10 {
- log.SyzFatalf("executor %d failed %d times: %v\n%s", pid, try, err, output)
- }
- // Don't print err/output in this case as it may contain "SYZFAIL" and we want to fail yet.
- log.Logf(1, "executor failed, retrying")
- if try > 3 {
- time.Sleep(100 * time.Millisecond)
- }
- continue
+ req.OnDone(ctx.Done)
+ return req
+}
+
+func (ctx *Context) Done(req *queue.Request, res *queue.Result) bool {
+ if res.Info != nil {
+ ctx.printCallResults(res.Info)
+ if ctx.hints {
+ ctx.printHints(req.Prog, res.Info)
}
- if info != nil {
- ctx.printCallResults(info)
- if *flagHints {
- ctx.printHints(p, info)
- }
- if *flagCoverFile != "" {
- covFile := fmt.Sprintf("%s_prog%d", *flagCoverFile, progIndex)
- ctx.dumpCoverage(covFile, info)
- }
- } else {
- log.Logf(1, "RESULT: no calls executed")
+ if ctx.coverFile != "" {
+ ctx.dumpCoverage(res.Info)
}
- break
}
-}
-
-func (ctx *Context) logProgram(pid int, p *prog.Prog) {
- data := p.Serialize()
- ctx.logMu.Lock()
- log.Logf(0, "executing program %v:\n%s", pid, data)
- ctx.logMu.Unlock()
+ completed := int(ctx.completed.Add(1))
+ if ctx.repeat > 0 && completed >= len(ctx.progs)*ctx.repeat {
+ ctx.done()
+ }
+ return true
}
func (ctx *Context) printCallResults(info *flatrpc.ProgInfo) {
@@ -269,20 +283,20 @@ func (ctx *Context) printCallResults(info *flatrpc.ProgInfo) {
func (ctx *Context) printHints(p *prog.Prog, info *flatrpc.ProgInfo) {
ncomps, ncandidates := 0, 0
for i := range p.Calls {
- if *flagOutput {
+ if ctx.output {
fmt.Printf("call %v:\n", i)
}
comps := make(prog.CompMap)
for _, cmp := range info.Calls[i].Comps {
comps.AddComp(cmp.Op1, cmp.Op2)
- if *flagOutput {
+ if ctx.output {
fmt.Printf("comp 0x%x ? 0x%x\n", cmp.Op1, cmp.Op2)
}
}
ncomps += len(comps)
p.MutateWithHints(i, comps, func(p *prog.Prog) bool {
ncandidates++
- if *flagOutput {
+ if ctx.output {
log.Logf(1, "PROGRAM:\n%s", p.Serialize())
}
return true
@@ -295,9 +309,10 @@ func (ctx *Context) dumpCallCoverage(coverFile string, info *flatrpc.CallInfo) {
if info == nil || len(info.Cover) == 0 {
return
}
+ sysTarget := targets.Get(ctx.target.OS, ctx.target.Arch)
buf := new(bytes.Buffer)
for _, pc := range info.Cover {
- prev := backend.PreviousInstructionPC(ctx.sysTarget, "", pc)
+ prev := backend.PreviousInstructionPC(sysTarget, "", pc)
fmt.Fprintf(buf, "0x%x\n", prev)
}
err := osutil.WriteFile(coverFile, buf.Bytes())
@@ -306,7 +321,8 @@ func (ctx *Context) dumpCallCoverage(coverFile string, info *flatrpc.CallInfo) {
}
}
-func (ctx *Context) dumpCoverage(coverFile string, info *flatrpc.ProgInfo) {
+func (ctx *Context) dumpCoverage(info *flatrpc.ProgInfo) {
+ coverFile := fmt.Sprintf("%s_prog%v", ctx.coverFile, ctx.resultIndex.Add(1))
for i, inf := range info.Calls {
log.Logf(0, "call #%v: signal %v, coverage %v", i, len(inf.Signal), len(inf.Cover))
ctx.dumpCallCoverage(fmt.Sprintf("%v.%v", coverFile, i), inf)
@@ -319,23 +335,28 @@ func (ctx *Context) dumpCoverage(coverFile string, info *flatrpc.ProgInfo) {
func (ctx *Context) getProgramIndex() int {
ctx.posMu.Lock()
- idx := ctx.pos
- ctx.pos++
- if idx%len(ctx.progs) == 0 && time.Since(ctx.lastPrint) > 5*time.Second {
- log.Logf(0, "executed programs: %v", idx)
+ defer ctx.posMu.Unlock()
+ if ctx.repeat > 0 && ctx.pos >= len(ctx.progs)*ctx.repeat {
+ return -1
+ }
+ idx := ctx.pos % len(ctx.progs)
+ if idx == 0 && time.Since(ctx.lastPrint) > 5*time.Second {
+ log.Logf(0, "executed programs: %v", ctx.pos)
ctx.lastPrint = time.Now()
}
- ctx.posMu.Unlock()
+ ctx.pos++
return idx
}
-func (ctx *Context) createStressProg(rs rand.Source) *prog.Prog {
- rnd := rand.New(rs)
+func (ctx *Context) createStressProg() *prog.Prog {
+ ctx.posMu.Lock()
+ rnd := rand.New(ctx.rs)
+ ctx.posMu.Unlock()
if len(ctx.progs) == 0 || rnd.Intn(2) == 0 {
- return ctx.target.Generate(rs, prog.RecommendedCalls, ctx.choiceTable)
+ return ctx.target.Generate(rnd, prog.RecommendedCalls, ctx.choiceTable)
}
p := ctx.progs[rnd.Intn(len(ctx.progs))].Clone()
- p.Mutate(rs, prog.RecommendedCalls, ctx.choiceTable, nil, ctx.progs)
+ p.Mutate(rnd, prog.RecommendedCalls, ctx.choiceTable, nil, ctx.progs)
return p
}
@@ -363,109 +384,3 @@ func loadPrograms(target *prog.Target, files []string) []*prog.Prog {
log.Logf(0, "parsed %v programs", len(progs))
return progs
}
-
-func createConfig(target *prog.Target, featuresFlags csource.Features, syscalls []int) (
- *ipc.Config, *flatrpc.ExecOpts, map[*prog.Syscall]bool, flatrpc.Feature) {
- config, execOpts, err := ipcconfig.Default(target)
- if err != nil {
- log.Fatalf("%v", err)
- }
- if execOpts.EnvFlags&flatrpc.ExecEnvSignal != 0 {
- execOpts.ExecFlags |= flatrpc.ExecFlagCollectCover
- }
- if *flagCoverFile != "" {
- execOpts.EnvFlags |= flatrpc.ExecEnvSignal
- execOpts.ExecFlags |= flatrpc.ExecFlagCollectCover
- execOpts.ExecFlags &^= flatrpc.ExecFlagDedupCover
- }
- if *flagHints {
- if execOpts.ExecFlags&flatrpc.ExecFlagCollectCover != 0 {
- execOpts.ExecFlags ^= flatrpc.ExecFlagCollectCover
- }
- execOpts.ExecFlags |= flatrpc.ExecFlagCollectComps
- }
- cfg := &mgrconfig.Config{
- Sandbox: ipc.FlagsToSandbox(execOpts.EnvFlags),
- SandboxArg: execOpts.SandboxArg,
- Derived: mgrconfig.Derived{
- TargetOS: target.OS,
- TargetArch: target.Arch,
- TargetVMArch: target.Arch,
- Target: target,
- SysTarget: targets.Get(target.OS, target.Arch),
- Syscalls: syscalls,
- },
- }
- checker := vminfo.New(cfg)
- fileInfos := host.ReadFiles(checker.RequiredFiles())
- featureInfos, err := host.SetupFeatures(target, config.Executor, flatrpc.AllFeatures, featuresFlags)
- if err != nil {
- log.Fatal(err)
- }
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- debug := execOpts.EnvFlags&flatrpc.ExecEnvDebug != 0
- go checkerExecutor(ctx, checker, config, debug)
-
- enabledSyscalls, disabledSyscalls, features, err := checker.Run(fileInfos, featureInfos)
- if err != nil {
- log.Fatal(err)
- }
- if *flagOutput {
- for feat, info := range features {
- log.Logf(0, "%-24v: %v", flatrpc.EnumNamesFeature[feat], info.Reason)
- }
- for c, reason := range disabledSyscalls {
- log.Logf(0, "unsupported syscall: %v: %v", c.Name, reason)
- }
- enabledSyscalls, disabledSyscalls = target.TransitivelyEnabledCalls(enabledSyscalls)
- for c, reason := range disabledSyscalls {
- log.Logf(0, "transitively unsupported: %v: %v", c.Name, reason)
- }
- }
- execOpts.EnvFlags |= ipc.FeaturesToFlags(features.Enabled(), featuresFlags)
- return config, execOpts, enabledSyscalls, features.Enabled()
-}
-
-func checkerExecutor(ctx context.Context, source queue.Source, config *ipc.Config, debug bool) {
- env, err := ipc.MakeEnv(config, 0)
- if err != nil {
- log.Fatalf("failed to create ipc env: %v", err)
- }
- defer env.Close()
- for {
- req := source.Next()
- if req == nil {
- select {
- case <-time.After(time.Second / 100):
- case <-ctx.Done():
- return
- }
- continue
- }
- progData, err := req.Prog.SerializeForExec()
- if err != nil {
- log.Fatalf("failed to serialize %s: %v", req.Prog.Serialize(), err)
- }
- execOpts := req.ExecOpts
- if debug {
- execOpts.EnvFlags |= flatrpc.ExecEnvDebug
- }
- output, info, hanged, err := env.ExecProg(&execOpts, progData)
- res := &queue.Result{
- Status: queue.Success,
- Info: info,
- Output: output,
- Err: err,
- }
- if err != nil {
- res.Status = queue.ExecFailure
- }
- if hanged && err == nil {
- res.Status = queue.ExecFailure
- res.Err = fmt.Errorf("hanged")
- }
- req.Done(res)
- }
-}
diff --git a/tools/syz-runtest/runtest.go b/tools/syz-runtest/runtest.go
deleted file mode 100644
index 370bc3954..000000000
--- a/tools/syz-runtest/runtest.go
+++ /dev/null
@@ -1,321 +0,0 @@
-// Copyright 2018 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-// TODO: fold syz-runtest into syz-manager.
-
-//go:build never
-
-// Runtest runs syzkaller test programs in sys/*/test/*. Start as:
-// $ syz-runtest -config manager.config
-// Also see pkg/runtest docs.
-package main
-
-import (
- "errors"
- "flag"
- "fmt"
- "log"
- "net"
- "os"
- "path/filepath"
- "slices"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/google/syzkaller/pkg/flatrpc"
- "github.com/google/syzkaller/pkg/fuzzer/queue"
- "github.com/google/syzkaller/pkg/instance"
- "github.com/google/syzkaller/pkg/mgrconfig"
- "github.com/google/syzkaller/pkg/osutil"
- "github.com/google/syzkaller/pkg/report"
- "github.com/google/syzkaller/pkg/rpctype"
- "github.com/google/syzkaller/pkg/runtest"
- "github.com/google/syzkaller/pkg/vminfo"
- "github.com/google/syzkaller/prog"
- _ "github.com/google/syzkaller/sys"
- "github.com/google/syzkaller/vm"
-)
-
-var (
- flagConfig = flag.String("config", "", "manager config")
- flagDebug = flag.Bool("debug", false, "debug mode")
- flagTests = flag.String("tests", "", "prefix to match test file names")
-)
-
-func main() {
- flag.Parse()
- cfg, err := mgrconfig.LoadFile(*flagConfig)
- if err != nil {
- log.Fatal(err)
- }
- vmPool, err := vm.Create(cfg, *flagDebug)
- if err != nil {
- log.Fatal(err)
- }
- reporter, err := report.NewReporter(cfg)
- if err != nil {
- log.Fatal(err)
- }
- osutil.MkdirAll(cfg.Workdir)
- mgr := &Manager{
- cfg: cfg,
- vmPool: vmPool,
- checker: vminfo.New(cfg),
- reporter: reporter,
- debug: *flagDebug,
- checkResultC: make(chan *rpctype.CheckArgs, 1),
- vmStop: make(chan bool),
- reqMap: make(map[int64]*queue.Request),
- pending: make(map[string]map[int64]bool),
- }
- mgr.checkFiles = mgr.checker.RequiredFiles()
- mgr.source = queue.DynamicSource(mgr.checker)
- s, err := rpctype.NewRPCServer(cfg.RPC, "Manager", mgr)
- if err != nil {
- log.Fatalf("failed to create rpc server: %v", err)
- }
- mgr.port = s.Addr().(*net.TCPAddr).Port
- go s.Serve()
- var wg sync.WaitGroup
- wg.Add(vmPool.Count())
- fmt.Printf("booting VMs...\n")
- var nameSeq atomic.Uint64
- for i := 0; i < vmPool.Count(); i++ {
- i := i
- go func() {
- defer wg.Done()
- for {
- name := fmt.Sprintf("vm-%v", nameSeq.Add(1))
- rep, err := mgr.boot(name, i)
- if err != nil {
- log.Fatal(err)
- }
- if rep == nil {
- return
- }
- if err := mgr.finishRequests(name, rep); err != nil {
- log.Fatal(err)
- }
- }
- }()
- }
- checkResult := <-mgr.checkResultC
- calls, _, features, err := mgr.checker.Run(checkResult.Files, checkResult.Features)
- if err != nil {
- log.Fatalf("failed to detect enabled syscalls: %v", err)
- }
- calls, _ = cfg.Target.TransitivelyEnabledCalls(calls)
- enabledCalls := make(map[string]map[*prog.Syscall]bool)
- // TODO: restore checking/testing of all other sandboxes (we used to test them).
- // Note: syz_emit_ethernet/syz_extract_tcp_res were manually disabled for "" ("no") sandbox,
- // b/c tun is not setup without sandbox.
- enabledCalls[mgr.cfg.Sandbox] = calls
- for feat, info := range features {
- fmt.Printf("%-24v: %v\n", flatrpc.EnumNamesFeature[feat], info.Reason)
- }
- for sandbox, calls := range enabledCalls {
- if sandbox == "" {
- sandbox = "no"
- }
- fmt.Printf("%-24v: %v calls enabled\n", sandbox+" sandbox", len(calls))
- }
- ctx := &runtest.Context{
- Dir: filepath.Join(cfg.Syzkaller, "sys", cfg.Target.OS, "test"),
- Target: cfg.Target,
- Features: features.Enabled(),
- EnabledCalls: enabledCalls,
- LogFunc: func(text string) { fmt.Println(text) },
- Verbose: true,
- Debug: *flagDebug,
- Tests: *flagTests,
- }
- mgr.source.Store(ctx)
- err = ctx.Run()
- close(vm.Shutdown)
- wg.Wait()
- if err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
-}
-
-type Manager struct {
- cfg *mgrconfig.Config
- vmPool *vm.Pool
- checker *vminfo.Checker
- checkFiles []string
- reporter *report.Reporter
- checkResultC chan *rpctype.CheckArgs
- vmStop chan bool
- port int
- debug bool
- source *queue.DynamicSourceCtl
-
- reqMu sync.Mutex
- reqSeq int64
- reqMap map[int64]*queue.Request
- pending map[string]map[int64]bool
-}
-
-func (mgr *Manager) boot(name string, index int) (*report.Report, error) {
- inst, err := mgr.vmPool.Create(index)
- if err != nil {
- return nil, fmt.Errorf("failed to create instance: %w", err)
- }
- defer inst.Close()
-
- fwdAddr, err := inst.Forward(mgr.port)
- if err != nil {
- return nil, fmt.Errorf("failed to setup port forwarding: %w", err)
- }
-
- fuzzerBin, err := inst.Copy(mgr.cfg.FuzzerBin)
- if err != nil {
- return nil, fmt.Errorf("failed to copy binary: %w", err)
- }
-
- // If SyzExecutorCmd is provided, it means that syz-executor is already in
- // the image, so no need to copy it.
- executorBin := mgr.cfg.SysTarget.ExecutorBin
- if executorBin == "" {
- executorBin, err = inst.Copy(mgr.cfg.ExecutorBin)
- if err != nil {
- return nil, fmt.Errorf("failed to copy binary: %w", err)
- }
- }
- args := &instance.FuzzerCmdArgs{
- Fuzzer: fuzzerBin,
- Executor: executorBin,
- Name: name,
- OS: mgr.cfg.TargetOS,
- Arch: mgr.cfg.TargetArch,
- FwdAddr: fwdAddr,
- Sandbox: mgr.cfg.Sandbox,
- Procs: 1,
- Verbosity: 0,
- Cover: mgr.cfg.Cover,
- Debug: mgr.debug,
- Test: false,
- Optional: &instance.OptionalFuzzerArgs{
- Slowdown: mgr.cfg.Timeouts.Slowdown,
- SandboxArg: mgr.cfg.SandboxArg,
- },
- }
- cmd := instance.FuzzerCmd(args)
- _, rep, err := inst.Run(time.Hour, mgr.reporter, cmd, vm.StopChan(mgr.vmStop))
- if err != nil {
- return nil, fmt.Errorf("failed to run fuzzer: %w", err)
- }
- return rep, nil
-}
-
-func (mgr *Manager) finishRequests(name string, rep *report.Report) error {
- mgr.reqMu.Lock()
- defer mgr.reqMu.Unlock()
- for id := range mgr.pending[name] {
- req := mgr.reqMap[id]
- if req == nil {
- return fmt.Errorf("vm crash: %v\n%s\n%s", rep.Title, rep.Report, rep.Output)
- }
- delete(mgr.reqMap, id)
- output := rep.Report
- if len(output) == 0 {
- output = rep.Output
- }
- req.Done(&queue.Result{
- Status: queue.Crashed,
- Err: fmt.Errorf("%v", rep.Title),
- Output: slices.Clone(output),
- })
- }
- delete(mgr.pending, name)
- return nil
-}
-
-func (mgr *Manager) Connect(a *rpctype.ConnectArgs, r *rpctype.ConnectRes) error {
- r.ReadFiles = append(mgr.checker.RequiredFiles(), mgr.checkFiles...)
- r.ReadGlobs = mgr.cfg.Target.RequiredGlobs()
- for feat := range flatrpc.EnumNamesFeature {
- r.Features |= feat
- }
- return nil
-}
-
-func (mgr *Manager) Check(a *rpctype.CheckArgs, r *rpctype.CheckRes) error {
- if a.Error != "" {
- log.Fatalf("machine check: %v", a.Error)
- }
- select {
- case mgr.checkResultC <- a:
- default:
- }
- return nil
-}
-
-func (mgr *Manager) ExchangeInfo(a *rpctype.ExchangeInfoRequest, r *rpctype.ExchangeInfoReply) error {
- mgr.reqMu.Lock()
- defer mgr.reqMu.Unlock()
- if mgr.pending[a.Name] == nil {
- mgr.pending[a.Name] = make(map[int64]bool)
- }
- for i := range a.Results {
- res := a.Results[i]
- if !mgr.pending[a.Name][res.ID] {
- log.Fatalf("runner %v wasn't executing request %v", a.Name, res.ID)
- }
- delete(mgr.pending[a.Name], res.ID)
- req := mgr.reqMap[res.ID]
- if req == nil {
- log.Fatalf("request %v does not exist", res.ID)
- }
- delete(mgr.reqMap, res.ID)
- if req == nil {
- log.Fatalf("got done request for unknown id %v", res.ID)
- }
- result := &queue.Result{
- Status: queue.Success,
- Info: &res.Info,
- Output: res.Output,
- }
- if res.Error != "" {
- result.Status = queue.ExecFailure
- result.Err = errors.New(res.Error)
- }
- req.Done(result)
- }
- for i := 0; i < a.NeedProgs; i++ {
- req := mgr.source.Next()
- if req == nil {
- break
- }
- mgr.reqSeq++
- mgr.reqMap[mgr.reqSeq] = req
- mgr.pending[a.Name][mgr.reqSeq] = true
- var progData []byte
- var err error
- if req.BinaryFile != "" {
- progData, err = os.ReadFile(req.BinaryFile)
- } else {
- progData, err = req.Prog.SerializeForExec()
- }
- if err != nil {
- log.Fatal(err)
- }
- r.Requests = append(r.Requests, rpctype.ExecutionRequest{
- ID: mgr.reqSeq,
- ProgData: progData,
- ExecOpts: req.ExecOpts,
- IsBinary: req.BinaryFile != "",
- ResetState: req.BinaryFile == "",
- ReturnOutput: true,
- ReturnError: true,
- Repeat: req.Repeat,
- })
- }
- return nil
-}
-
-func (mgr *Manager) StartExecuting(a *rpctype.ExecutingRequest, r *int) error {
- return nil
-}
diff --git a/vm/adb/adb.go b/vm/adb/adb.go
index a1a3d7531..ed9fa56b7 100644
--- a/vm/adb/adb.go
+++ b/vm/adb/adb.go
@@ -323,7 +323,7 @@ func (inst *instance) adbWithTimeout(timeout time.Duration, args ...string) ([]b
}
func (inst *instance) waitForBootCompletion() {
- // ADB connects to a phone and starts syz-fuzzer while the phone is still booting.
+ // ADB connects to a phone and starts syz-executor while the phone is still booting.
// This enables syzkaller to create a race condition which in certain cases doesn't
// allow the phone to finalize initialization.
// To determine whether a system has booted and started all system processes and
diff --git a/vm/gvisor/gvisor.go b/vm/gvisor/gvisor.go
index ed3018a51..6d26da5bb 100644
--- a/vm/gvisor/gvisor.go
+++ b/vm/gvisor/gvisor.go
@@ -244,14 +244,6 @@ func (inst *instance) Info() ([]byte, error) {
return []byte(info), nil
}
-func (inst *instance) PprofPort() int {
- // Some of the gVisor instances use the host's network namespace, which
- // results in conflicting bind operations on the same HTTP port.
- // Until there's an actual need to debug gVisor VMs with pprof, let's
- // just disable it.
- return 0
-}
-
func (inst *instance) runscCmd(add ...string) *exec.Cmd {
cmd := osutil.Command(inst.image, append(inst.args(), add...)...)
cmd.Env = []string{
diff --git a/vm/qemu/qemu.go b/vm/qemu/qemu.go
index 6f5cb4f56..738ff5acf 100644
--- a/vm/qemu/qemu.go
+++ b/vm/qemu/qemu.go
@@ -436,13 +436,9 @@ func (inst *instance) boot() error {
templateDir := filepath.Join(inst.workdir, "template")
args = append(args, splitArgs(inst.cfg.QemuArgs, templateDir, inst.index)...)
- forwardedPort := vmimpl.UnusedTCPPort()
- pprofExt := fmt.Sprintf(",hostfwd=tcp::%v-:%v", forwardedPort, vmimpl.PprofPort)
- log.Logf(3, "instance %s's pprof is available at 127.0.0.1:%v", instanceName, forwardedPort)
-
args = append(args,
"-device", inst.cfg.NetDev+",netdev=net0",
- "-netdev", fmt.Sprintf("user,id=net0,restrict=on,hostfwd=tcp:127.0.0.1:%v-:22%s", inst.port, pprofExt),
+ "-netdev", fmt.Sprintf("user,id=net0,restrict=on,hostfwd=tcp:127.0.0.1:%v-:22", inst.port),
)
if inst.image == "9p" {
args = append(args,
@@ -617,7 +613,7 @@ func (inst *instance) Copy(hostSrc string) (string, error) {
base := filepath.Base(hostSrc)
vmDst := filepath.Join(inst.targetDir(), base)
if inst.target.HostFuzzer {
- if base == "syz-fuzzer" || base == "syz-execprog" {
+ if base == "syz-execprog" {
return hostSrc, nil // we will run these on host
}
if inst.files == nil {
@@ -648,8 +644,7 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
sshArgs := vmimpl.SSHArgsForward(inst.debug, inst.sshkey, inst.port, inst.forwardPort, false)
args := strings.Split(command, " ")
- if bin := filepath.Base(args[0]); inst.target.HostFuzzer &&
- (bin == "syz-fuzzer" || bin == "syz-execprog") {
+ if bin := filepath.Base(args[0]); inst.target.HostFuzzer && bin == "syz-execprog" {
// Weird mode for Fuchsia.
// Fuzzer and execprog are on host (we did not copy them), so we will run them as is,
// but we will also wrap executor with ssh invocation.
diff --git a/vm/vm.go b/vm/vm.go
index 02b0e1030..6f66f8a37 100644
--- a/vm/vm.go
+++ b/vm/vm.go
@@ -253,18 +253,6 @@ func (inst *Instance) Info() ([]byte, error) {
return nil, nil
}
-func (inst *Instance) PprofPort() int {
- if inst.pool.hostFuzzer {
- // In the fuzzing on host mode, fuzzers are always on the same network.
- // Don't set up pprof endpoints in this case.
- return 0
- }
- if ii, ok := inst.impl.(vmimpl.PprofPortProvider); ok {
- return ii.PprofPort()
- }
- return vmimpl.PprofPort
-}
-
func (inst *Instance) diagnose(rep *report.Report) ([]byte, bool) {
if rep == nil {
panic("rep is nil")
@@ -353,8 +341,7 @@ func (mon *monitor) monitorExecution() *report.Report {
func (mon *monitor) appendOutput(out []byte) (*report.Report, bool) {
lastPos := len(mon.output)
mon.output = append(mon.output, out...)
- if bytes.Contains(mon.output[lastPos:], executingProgram1) ||
- bytes.Contains(mon.output[lastPos:], executingProgram2) {
+ if bytes.Contains(mon.output[lastPos:], executingProgram) {
mon.lastExecuteTime = time.Now()
}
if mon.reporter.ContainsCrash(mon.output[mon.matchPos:]) {
@@ -402,7 +389,7 @@ func (mon *monitor) extractError(defaultError string) *report.Report {
if defaultError != noOutputCrash || diagWait {
mon.waitForOutput()
}
- if bytes.Contains(mon.output, []byte(fuzzerPreemptedStr)) {
+ if bytes.Contains(mon.output, []byte(executorPreemptedStr)) {
return nil
}
if defaultError == "" && mon.reporter.ContainsCrash(mon.output[mon.matchPos:]) {
@@ -470,16 +457,15 @@ func (mon *monitor) waitForOutput() {
const (
maxErrorLength = 256
- lostConnectionCrash = "lost connection to test machine"
- noOutputCrash = "no output from test machine"
- timeoutCrash = "timed out"
- fuzzerPreemptedStr = "SYZ-FUZZER: PREEMPTED"
- vmDiagnosisStart = "\nVM DIAGNOSIS:\n"
+ lostConnectionCrash = "lost connection to test machine"
+ noOutputCrash = "no output from test machine"
+ timeoutCrash = "timed out"
+ executorPreemptedStr = "SYZ-EXECUTOR: PREEMPTED"
+ vmDiagnosisStart = "\nVM DIAGNOSIS:\n"
)
var (
- executingProgram1 = []byte("executing program") // syz-fuzzer, syz-runner output
- executingProgram2 = []byte("executed programs:") // syz-execprog output
+ executingProgram = []byte("executed programs:") // syz-execprog output
beforeContextDefault = 128 << 10
afterContext = 128 << 10
diff --git a/vm/vm_test.go b/vm/vm_test.go
index 4f0e2836d..afb8634bd 100644
--- a/vm/vm_test.go
+++ b/vm/vm_test.go
@@ -191,7 +191,7 @@ var tests = []*Test{
Name: "fuzzer-is-preempted",
Body: func(outc chan []byte, errc chan error) {
outc <- []byte("BUG: bad\n")
- outc <- []byte(fuzzerPreemptedStr + "\n")
+ outc <- []byte(executorPreemptedStr + "\n")
},
},
{
@@ -263,23 +263,12 @@ var tests = []*Test{
},
},
{
- Name: "no-no-output-1",
+ Name: "no-no-output",
Exit: ExitNormal,
Body: func(outc chan []byte, errc chan error) {
for i := 0; i < 5; i++ {
time.Sleep(time.Second)
- outc <- append(executingProgram1, '\n')
- }
- errc <- nil
- },
- },
- {
- Name: "no-no-output-2",
- Exit: ExitNormal,
- Body: func(outc chan []byte, errc chan error) {
- for i := 0; i < 5; i++ {
- time.Sleep(time.Second)
- outc <- append(executingProgram2, '\n')
+ outc <- append(executingProgram, '\n')
}
errc <- nil
},
diff --git a/vm/vmimpl/vmimpl.go b/vm/vmimpl/vmimpl.go
index a9afdc1f1..0a4ada028 100644
--- a/vm/vmimpl/vmimpl.go
+++ b/vm/vmimpl/vmimpl.go
@@ -67,11 +67,6 @@ type Infoer interface {
Info() ([]byte, error)
}
-// PprofPortProvider is used when the instance wants to define a custom pprof port.
-type PprofPortProvider interface {
- PprofPort() int
-}
-
// Env contains global constant parameters for a pool of VMs.
type Env struct {
// Unique name
@@ -191,9 +186,6 @@ func Multiplex(cmd *exec.Cmd, merger *OutputMerger, console io.Closer, timeout t
return merger.Output, errc, nil
}
-// On VMs, pprof will be listening to this port.
-const PprofPort = 6060
-
func RandomPort() int {
n, err := rand.Int(rand.Reader, big.NewInt(64<<10-1<<10))
if err != nil {