aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--executor/common_test.h2
-rw-r--r--executor/executor.cc83
-rw-r--r--executor/executor_bsd.h15
-rw-r--r--executor/executor_darwin.h15
-rw-r--r--executor/executor_linux.h78
-rw-r--r--executor/executor_runner.h28
-rw-r--r--executor/executor_test.h63
-rw-r--r--executor/nocover.h15
-rw-r--r--pkg/flatrpc/flatrpc.fbs2
-rw-r--r--pkg/flatrpc/flatrpc.go92
-rw-r--r--pkg/flatrpc/flatrpc.h50
-rw-r--r--pkg/rpcserver/local.go5
-rw-r--r--pkg/rpcserver/rpcserver.go65
-rw-r--r--pkg/rpcserver/runner.go55
-rw-r--r--pkg/runtest/executor_test.go4
-rw-r--r--pkg/runtest/run_test.go60
-rw-r--r--sys/targets/targets.go29
-rw-r--r--sys/test/exec.txt4
18 files changed, 346 insertions, 319 deletions
diff --git a/executor/common_test.h b/executor/common_test.h
index d580ae2ff..dc39fa326 100644
--- a/executor/common_test.h
+++ b/executor/common_test.h
@@ -170,7 +170,7 @@ static long syz_test_fuzzer1(volatile long a, volatile long b, volatile long c)
#endif
#if SYZ_EXECUTOR || __NR_syz_inject_cover
-static long syz_inject_cover(volatile long a, volatile long b, volatile long c)
+static long syz_inject_cover(volatile long a, volatile long b)
#if SYZ_EXECUTOR
; // defined in executor_test.h
#else
diff --git a/executor/executor.cc b/executor/executor.cc
index 616e86752..fb5d242de 100644
--- a/executor/executor.cc
+++ b/executor/executor.cc
@@ -314,7 +314,8 @@ const uint64 no_copyout = -1;
static int running;
static uint32 completed;
-static bool is_kernel_64_bit = true;
+static bool is_kernel_64_bit;
+static bool use_cover_edges;
static uint8* input_data;
@@ -395,20 +396,20 @@ const uint64 kInMagic = 0xbadc0ffeebadface;
struct handshake_req {
uint64 magic;
+ bool use_cover_edges;
+ bool is_kernel_64_bit;
rpc::ExecEnv flags;
uint64 pid;
uint64 sandbox_arg;
+ uint64 syscall_timeout_ms;
+ uint64 program_timeout_ms;
+ uint64 slowdown_scale;
};
struct execute_req {
uint64 magic;
uint64 id;
- rpc::ExecEnv env_flags;
uint64 exec_flags;
- uint64 pid;
- uint64 syscall_timeout_ms;
- uint64 program_timeout_ms;
- uint64 slowdown_scale;
uint64 all_call_signal;
bool all_extra_signal;
};
@@ -681,28 +682,6 @@ void setup_control_pipes()
fail("dup2(2, 0) failed");
}
-void parse_env_flags(rpc::ExecEnv flags)
-{
- // Note: Values correspond to ordering in pkg/ipc/ipc.go, e.g. FlagSandboxNamespace
- flag_debug = (bool)(flags & rpc::ExecEnv::Debug);
- flag_coverage = (bool)(flags & rpc::ExecEnv::Signal);
- flag_sandbox_none = (bool)(flags & rpc::ExecEnv::SandboxNone);
- flag_sandbox_setuid = (bool)(flags & rpc::ExecEnv::SandboxSetuid);
- flag_sandbox_namespace = (bool)(flags & rpc::ExecEnv::SandboxNamespace);
- flag_sandbox_android = (bool)(flags & rpc::ExecEnv::SandboxAndroid);
- flag_extra_coverage = (bool)(flags & rpc::ExecEnv::ExtraCover);
- flag_net_injection = (bool)(flags & rpc::ExecEnv::EnableTun);
- flag_net_devices = (bool)(flags & rpc::ExecEnv::EnableNetDev);
- flag_net_reset = (bool)(flags & rpc::ExecEnv::EnableNetReset);
- flag_cgroups = (bool)(flags & rpc::ExecEnv::EnableCgroups);
- flag_close_fds = (bool)(flags & rpc::ExecEnv::EnableCloseFds);
- flag_devlink_pci = (bool)(flags & rpc::ExecEnv::EnableDevlinkPCI);
- flag_vhci_injection = (bool)(flags & rpc::ExecEnv::EnableVhciInjection);
- flag_wifi = (bool)(flags & rpc::ExecEnv::EnableWifi);
- flag_delay_kcov_mmap = (bool)(flags & rpc::ExecEnv::DelayKcovMmap);
- flag_nic_vf = (bool)(flags & rpc::ExecEnv::EnableNicVF);
-}
-
void receive_handshake()
{
handshake_req req = {};
@@ -714,27 +693,40 @@ void receive_handshake()
#if SYZ_HAVE_SANDBOX_ANDROID
sandbox_arg = req.sandbox_arg;
#endif
- parse_env_flags(req.flags);
+ is_kernel_64_bit = req.is_kernel_64_bit;
+ use_cover_edges = req.use_cover_edges;
procid = req.pid;
+ syscall_timeout_ms = req.syscall_timeout_ms;
+ program_timeout_ms = req.program_timeout_ms;
+ slowdown_scale = req.slowdown_scale;
+ flag_debug = (bool)(req.flags & rpc::ExecEnv::Debug);
+ flag_coverage = (bool)(req.flags & rpc::ExecEnv::Signal);
+ flag_sandbox_none = (bool)(req.flags & rpc::ExecEnv::SandboxNone);
+ flag_sandbox_setuid = (bool)(req.flags & rpc::ExecEnv::SandboxSetuid);
+ flag_sandbox_namespace = (bool)(req.flags & rpc::ExecEnv::SandboxNamespace);
+ flag_sandbox_android = (bool)(req.flags & rpc::ExecEnv::SandboxAndroid);
+ flag_extra_coverage = (bool)(req.flags & rpc::ExecEnv::ExtraCover);
+ flag_net_injection = (bool)(req.flags & rpc::ExecEnv::EnableTun);
+ flag_net_devices = (bool)(req.flags & rpc::ExecEnv::EnableNetDev);
+ flag_net_reset = (bool)(req.flags & rpc::ExecEnv::EnableNetReset);
+ flag_cgroups = (bool)(req.flags & rpc::ExecEnv::EnableCgroups);
+ flag_close_fds = (bool)(req.flags & rpc::ExecEnv::EnableCloseFds);
+ flag_devlink_pci = (bool)(req.flags & rpc::ExecEnv::EnableDevlinkPCI);
+ flag_vhci_injection = (bool)(req.flags & rpc::ExecEnv::EnableVhciInjection);
+ flag_wifi = (bool)(req.flags & rpc::ExecEnv::EnableWifi);
+ flag_delay_kcov_mmap = (bool)(req.flags & rpc::ExecEnv::DelayKcovMmap);
+ flag_nic_vf = (bool)(req.flags & rpc::ExecEnv::EnableNicVF);
}
-static execute_req last_execute_req;
-
void receive_execute()
{
- execute_req& req = last_execute_req;
+ execute_req req = {};
ssize_t n = read(kInPipeFd, &req, sizeof(req));
if (n != (ssize_t)sizeof(req))
failmsg("control pipe read failed", "read=%zd want=%zd", n, sizeof(req));
if (req.magic != kInMagic)
failmsg("bad execute request magic", "magic=0x%llx", req.magic);
request_id = req.id;
- parse_env_flags(req.env_flags);
- procid = req.pid;
- request_id = req.id;
- syscall_timeout_ms = req.syscall_timeout_ms;
- program_timeout_ms = req.program_timeout_ms;
- slowdown_scale = req.slowdown_scale;
flag_collect_signal = req.exec_flags & (1 << 0);
flag_collect_cover = req.exec_flags & (1 << 1);
flag_dedup_cover = req.exec_flags & (1 << 2);
@@ -744,10 +736,11 @@ void receive_execute()
all_extra_signal = req.all_extra_signal;
debug("[%llums] exec opts: procid=%llu threaded=%d cover=%d comps=%d dedup=%d signal=%d "
- " sandbox=%d/%d/%d/%d timeouts=%llu/%llu/%llu\n",
+ " sandbox=%d/%d/%d/%d timeouts=%llu/%llu/%llu kernel_64_bit=%d\n",
current_time_ms() - start_time_ms, procid, flag_threaded, flag_collect_cover,
flag_comparisons, flag_dedup_cover, flag_collect_signal, flag_sandbox_none, flag_sandbox_setuid,
- flag_sandbox_namespace, flag_sandbox_android, syscall_timeout_ms, program_timeout_ms, slowdown_scale);
+ flag_sandbox_namespace, flag_sandbox_android, syscall_timeout_ms, program_timeout_ms, slowdown_scale,
+ is_kernel_64_bit);
if (syscall_timeout_ms == 0 || program_timeout_ms <= syscall_timeout_ms || slowdown_scale == 0)
failmsg("bad timeouts", "syscall=%llu, program=%llu, scale=%llu",
syscall_timeout_ms, program_timeout_ms, slowdown_scale);
@@ -1055,10 +1048,8 @@ uint32 write_signal(flatbuffers::FlatBufferBuilder& fbb, cover_t* cov, bool all)
bool prev_filter = true;
for (uint32 i = 0; i < cov->size; i++) {
cover_data_t pc = cover_data[i] + cov->pc_offset;
- if (is_kernel_pc(pc) < 0)
- exitf("got bad pc: 0x%llx", (uint64)pc);
uint64 sig = pc;
- if (use_cover_edges(pc)) {
+ if (use_cover_edges) {
// Only hash the lower 12 bits so the hash is independent of any module offsets.
const uint64 mask = (1 << 12) - 1;
sig ^= hash(prev_pc & mask) & mask;
@@ -1630,12 +1621,6 @@ std::tuple<rpc::ComparisonRaw, bool, bool> convert(const kcov_comparison_t& cmp)
return {};
if (arg2 >= out_start && arg2 <= out_end)
return {};
- // Filter out kernel physical memory addresses.
- // These are internal kernel comparisons and should not be interesting.
- bool kptr1 = is_kernel_data(arg1) || is_kernel_pc(arg1) > 0 || arg1 == 0;
- bool kptr2 = is_kernel_data(arg2) || is_kernel_pc(arg2) > 0 || arg2 == 0;
- if (kptr1 && kptr2)
- return {};
if (!coverage_filter(cmp.pc))
return {};
diff --git a/executor/executor_bsd.h b/executor/executor_bsd.h
index 3ee4be80c..e8530f614 100644
--- a/executor/executor_bsd.h
+++ b/executor/executor_bsd.h
@@ -179,21 +179,6 @@ static void cover_collect(cover_t* cov)
cov->size = *(uint64*)cov->data;
}
-static bool is_kernel_data(uint64 addr)
-{
- return false;
-}
-
-static int is_kernel_pc(uint64 pc)
-{
- return 0;
-}
-
-static bool use_cover_edges(uint64 pc)
-{
- return true;
-}
-
#if GOOS_netbsd
#define SYZ_HAVE_FEATURES 1
static feature_t features[] = {
diff --git a/executor/executor_darwin.h b/executor/executor_darwin.h
index 11146acc3..76b939fcf 100644
--- a/executor/executor_darwin.h
+++ b/executor/executor_darwin.h
@@ -121,18 +121,3 @@ static void cover_collect(cover_t* cov)
cov->data_offset = ((int64_t) & (trace->pcs)) - ((int64_t)(cov->data));
cov->pc_offset = trace->offset;
}
-
-static bool is_kernel_data(uint64 addr)
-{
- return false;
-}
-
-static int is_kernel_pc(uint64 pc)
-{
- return 0;
-}
-
-static bool use_cover_edges(uint64 pc)
-{
- return true;
-}
diff --git a/executor/executor_linux.h b/executor/executor_linux.h
index cb980838f..ff8f3bc60 100644
--- a/executor/executor_linux.h
+++ b/executor/executor_linux.h
@@ -36,8 +36,6 @@ struct kcov_remote_arg {
#define KCOV_SUBSYSTEM_MASK (0xffull << 56)
#define KCOV_INSTANCE_MASK (0xffffffffull)
-static bool is_gvisor;
-
static inline __u64 kcov_remote_handle(__u64 subsys, __u64 inst)
{
if (subsys & ~KCOV_SUBSYSTEM_MASK || inst & ~KCOV_INSTANCE_MASK)
@@ -45,14 +43,9 @@ static inline __u64 kcov_remote_handle(__u64 subsys, __u64 inst)
return subsys | inst;
}
-static bool detect_kernel_bitness();
-static bool detect_gvisor();
-
static void os_init(int argc, char** argv, char* data, size_t data_size)
{
prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
- is_kernel_64_bit = detect_kernel_bitness();
- is_gvisor = detect_gvisor();
// Surround the main data mapping with PROT_NONE pages to make virtual address layout more consistent
// across different configurations (static/non-static build) and C repros.
// One observed case before: executor had a mapping above the data mapping (output region),
@@ -177,77 +170,6 @@ static void cover_collect(cover_t* cov)
cov->size = *(uint32*)cov->data;
}
-static bool use_cover_edges(uint32 pc)
-{
- return true;
-}
-
-static bool is_kernel_data(uint64 addr)
-{
- if (is_gvisor)
- return false;
-#if GOARCH_386 || GOARCH_amd64
- // This range corresponds to the first 1TB of the physical memory mapping,
- // see Documentation/arch/x86/x86_64/mm.rst.
- return addr >= 0xffff880000000000ull && addr < 0xffff890000000000ull;
-#else
- return false;
-#endif
-}
-
-// Returns >0 for yes, <0 for no, 0 for don't know.
-static int is_kernel_pc(uint64 pc)
-{
- if (is_gvisor)
- return 0;
-#if GOARCH_386 || GOARCH_amd64
- // Text/modules range for x86_64.
- return pc >= 0xffffffff80000000ull && pc < 0xffffffffff000000ull ? 1 : -1;
-#else
- return 0;
-#endif
-}
-
-static bool use_cover_edges(uint64 pc)
-{
-#if GOARCH_amd64 || GOARCH_arm64
- if (is_gvisor)
- return false; // gvisor coverage is not a trace, so producing edges won't work
-#endif
- return true;
-}
-
-static bool detect_kernel_bitness()
-{
- if (sizeof(void*) == 8)
- return true;
- // It turns out to be surprisingly hard to understand if the kernel underneath is 64-bits.
- // A common method is to look at uname.machine. But it is produced in some involved ways,
- // and we will need to know about all strings it returns and in the end it can be overriden
- // during build and lie (and there are known precedents of this).
- // So instead we look at size of addresses in /proc/kallsyms.
- bool wide = true;
- int fd = open("/proc/kallsyms", O_RDONLY);
- if (fd != -1) {
- char buf[16];
- if (read(fd, buf, sizeof(buf)) == sizeof(buf) &&
- (buf[8] == ' ' || buf[8] == '\t'))
- wide = false;
- close(fd);
- }
- debug("detected %d-bit kernel\n", wide ? 64 : 32);
- return wide;
-}
-
-static bool detect_gvisor()
-{
- char buf[64] = {};
- // 3 stands for undeclared SYSLOG_ACTION_READ_ALL.
- syscall(__NR_syslog, 3, buf, sizeof(buf) - 1);
- // This is a first line of gvisor dmesg.
- return strstr(buf, "Starting gVisor");
-}
-
// One does not simply exit.
// _exit can in fact fail.
// syzkaller did manage to generate a seccomp filter that prohibits exit_group syscall.
diff --git a/executor/executor_runner.h b/executor/executor_runner.h
index 100ed87f6..86ce1819a 100644
--- a/executor/executor_runner.h
+++ b/executor/executor_runner.h
@@ -33,12 +33,14 @@ class Proc
{
public:
Proc(Connection& conn, const char* bin, int id, int max_signal_fd, int cover_filter_fd,
- uint32 slowdown, uint32 syscall_timeout_ms, uint32 program_timeout_ms)
+ bool use_cover_edges, bool is_kernel_64_bit, uint32 slowdown, uint32 syscall_timeout_ms, uint32 program_timeout_ms)
: conn_(conn),
bin_(bin),
id_(id),
max_signal_fd_(max_signal_fd),
cover_filter_fd_(cover_filter_fd),
+ use_cover_edges_(use_cover_edges),
+ is_kernel_64_bit_(is_kernel_64_bit),
slowdown_(slowdown),
syscall_timeout_ms_(syscall_timeout_ms),
program_timeout_ms_(program_timeout_ms),
@@ -129,6 +131,8 @@ private:
const int id_;
const int max_signal_fd_;
const int cover_filter_fd_;
+ const bool use_cover_edges_;
+ const bool is_kernel_64_bit_;
const uint32 slowdown_;
const uint32 syscall_timeout_ms_;
const uint32 program_timeout_ms_;
@@ -265,9 +269,14 @@ private:
sandbox_arg_ = msg_->exec_opts->sandbox_arg();
handshake_req req = {
.magic = kInMagic,
+ .use_cover_edges = use_cover_edges_,
+ .is_kernel_64_bit = is_kernel_64_bit_,
.flags = exec_env_,
.pid = static_cast<uint64>(id_),
.sandbox_arg = static_cast<uint64>(sandbox_arg_),
+ .syscall_timeout_ms = syscall_timeout_ms_,
+ .program_timeout_ms = program_timeout_ms_,
+ .slowdown_scale = slowdown_,
};
if (write(req_pipe_, &req, sizeof(req)) != sizeof(req)) {
debug("request pipe write failed (errno=%d)\n", errno);
@@ -312,12 +321,7 @@ private:
execute_req req{
.magic = kInMagic,
.id = static_cast<uint64>(msg_->id),
- .env_flags = exec_env_,
.exec_flags = static_cast<uint64>(msg_->exec_opts->exec_flags()),
- .pid = static_cast<uint64>(id_),
- .syscall_timeout_ms = syscall_timeout_ms_,
- .program_timeout_ms = program_timeout_ms_,
- .slowdown_scale = slowdown_,
.all_call_signal = all_call_signal,
.all_extra_signal = all_extra_signal,
};
@@ -461,7 +465,7 @@ public:
int cover_filter_fd = cover_filter_ ? cover_filter_->FD() : -1;
for (size_t i = 0; i < num_procs; i++)
procs_.emplace_back(new Proc(conn, bin, i, max_signal_fd, cover_filter_fd,
- slowdown_, syscall_timeout_ms_, program_timeout_ms_));
+ use_cover_edges_, is_kernel_64_bit_, slowdown_, syscall_timeout_ms_, program_timeout_ms_));
for (;;)
Loop();
@@ -475,6 +479,8 @@ private:
std::vector<std::unique_ptr<Proc>> procs_;
std::deque<rpc::ExecRequestRawT> requests_;
std::vector<std::string> leak_frames_;
+ bool use_cover_edges_ = false;
+ bool is_kernel_64_bit_ = false;
uint32 slowdown_ = 0;
uint32 syscall_timeout_ms_ = 0;
uint32 program_timeout_ms_ = 0;
@@ -538,11 +544,14 @@ private:
conn_.Recv(conn_reply);
if (conn_reply.debug)
flag_debug = true;
- debug("connected to manager: procs=%d slowdown=%d syscall_timeout=%u"
+ debug("connected to manager: procs=%d cover_edges=%d kernel_64_bit=%d slowdown=%d syscall_timeout=%u"
" program_timeout=%u features=0x%llx\n",
- conn_reply.procs, conn_reply.slowdown, conn_reply.syscall_timeout_ms,
+ conn_reply.procs, conn_reply.cover_edges, conn_reply.kernel_64_bit,
+ conn_reply.slowdown, conn_reply.syscall_timeout_ms,
conn_reply.program_timeout_ms, static_cast<uint64>(conn_reply.features));
leak_frames_ = conn_reply.leak_frames;
+ use_cover_edges_ = conn_reply.cover_edges;
+ is_kernel_64_bit_ = is_kernel_64_bit = conn_reply.kernel_64_bit;
slowdown_ = conn_reply.slowdown;
syscall_timeout_ms_ = conn_reply.syscall_timeout_ms;
program_timeout_ms_ = conn_reply.program_timeout_ms;
@@ -555,7 +564,6 @@ private:
// This does any one-time setup for the requested features on the machine.
// Note: this can be called multiple times and must be idempotent.
- // is_kernel_64_bit = detect_kernel_bitness();
#if SYZ_HAVE_FEATURES
setup_sysctl();
setup_cgroups();
diff --git a/executor/executor_test.h b/executor/executor_test.h
index c2802dd2a..5e128d851 100644
--- a/executor/executor_test.h
+++ b/executor/executor_test.h
@@ -9,6 +9,7 @@
#include <sys/prctl.h>
#endif
+// sys/targets also know about these consts.
static uint64 kernel_text_start = 0xc0dec0dec0000000;
static uint64 kernel_text_mask = 0xffffff;
@@ -35,17 +36,30 @@ static void os_init(int argc, char** argv, void* data, size_t data_size)
extern "C" notrace void __sanitizer_cov_trace_pc(void)
{
- unsigned long ip = (unsigned long)__builtin_return_address(0);
- // Convert to what is_kernel_pc will accept as valid coverage;
- ip = kernel_text_start | (ip & kernel_text_mask);
if (current_thread == nullptr || current_thread->cov.data == nullptr || current_thread->cov.collect_comps)
return;
- unsigned long* start = (unsigned long*)current_thread->cov.data;
- unsigned long* end = (unsigned long*)current_thread->cov.data_end;
- int pos = start[0];
- if (start + pos + 1 < end) {
- start[0] = pos + 1;
- start[pos + 1] = ip;
+ uint64 pc = (uint64)__builtin_return_address(0);
+ // Convert to what is_kernel_pc will accept as valid coverage;
+ pc = kernel_text_start | (pc & kernel_text_mask);
+ // Note: we duplicate the following code instead of using a template function
+ // because it must not be instrumented which is hard to achieve for all compiler
+ // if the code is in a separate function.
+ if (is_kernel_64_bit) {
+ uint64* start = (uint64*)current_thread->cov.data;
+ uint64* end = (uint64*)current_thread->cov.data_end;
+ uint64 pos = start[0];
+ if (start + pos + 1 < end) {
+ start[0] = pos + 1;
+ start[pos + 1] = pc;
+ }
+ } else {
+ uint32* start = (uint32*)current_thread->cov.data;
+ uint32* end = (uint32*)current_thread->cov.data_end;
+ uint32 pos = start[0];
+ if (start + pos + 1 < end) {
+ start[0] = pos + 1;
+ start[pos + 1] = pc;
+ }
}
}
@@ -97,7 +111,7 @@ static void cover_mmap(cover_t* cov)
if (cov->data == MAP_FAILED)
exitf("cover mmap failed");
cov->data_end = cov->data + cov->mmap_alloc_size;
- cov->data_offset = sizeof(unsigned long);
+ cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
// We don't care about the specific PC values for now.
// Once we do, we might want to consider ASLR here.
cov->pc_offset = 0;
@@ -107,36 +121,13 @@ static void cover_unprotect(cover_t* cov)
{
}
-static bool is_kernel_data(uint64 addr)
-{
- return addr >= 0xda1a0000 && addr <= 0xda1a1000;
-}
-
-static int is_kernel_pc(uint64 pc)
-{
- uint64 start = kernel_text_start;
- uint64 end = kernel_text_start | kernel_text_mask;
- if (!is_kernel_64_bit) {
- start = (uint32)start;
- end = (uint32)end;
- }
- return pc >= start && pc <= end ? 1 : -1;
-}
-
-static bool use_cover_edges(uint64 pc)
-{
- return true;
-}
-
-static long syz_inject_cover(volatile long a, volatile long b, volatile long c)
+static long syz_inject_cover(volatile long a, volatile long b)
{
cover_t* cov = &current_thread->cov;
if (cov->data == nullptr)
return ENOENT;
- is_kernel_64_bit = a;
- cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
- uint32 size = std::min((uint32)c, cov->mmap_alloc_size);
- memcpy(cov->data, (void*)b, size);
+ uint32 size = std::min((uint32)b, cov->mmap_alloc_size);
+ memcpy(cov->data, (void*)a, size);
memset(cov->data + size, 0xcd, std::min<uint64>(100, cov->mmap_alloc_size - size));
return 0;
}
diff --git a/executor/nocover.h b/executor/nocover.h
index b097e9f43..10e256cdd 100644
--- a/executor/nocover.h
+++ b/executor/nocover.h
@@ -28,18 +28,3 @@ static void cover_mmap(cover_t* cov)
static void cover_unprotect(cover_t* cov)
{
}
-
-static bool is_kernel_data(uint64 addr)
-{
- return false;
-}
-
-static int is_kernel_pc(uint64 pc)
-{
- return 0;
-}
-
-static bool use_cover_edges(uint64 pc)
-{
- return true;
-}
diff --git a/pkg/flatrpc/flatrpc.fbs b/pkg/flatrpc/flatrpc.fbs
index 58a6b3250..f0b03c4a9 100644
--- a/pkg/flatrpc/flatrpc.fbs
+++ b/pkg/flatrpc/flatrpc.fbs
@@ -37,6 +37,8 @@ table ConnectRequestRaw {
table ConnectReplyRaw {
debug :bool;
cover :bool;
+ cover_edges :bool;
+ kernel_64_bit :bool;
procs :int32;
slowdown :int32;
syscall_timeout_ms :int32;
diff --git a/pkg/flatrpc/flatrpc.go b/pkg/flatrpc/flatrpc.go
index 0e46dddbd..87f6ad19d 100644
--- a/pkg/flatrpc/flatrpc.go
+++ b/pkg/flatrpc/flatrpc.go
@@ -509,6 +509,8 @@ func ConnectRequestRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
type ConnectReplyRawT struct {
Debug bool `json:"debug"`
Cover bool `json:"cover"`
+ CoverEdges bool `json:"cover_edges"`
+ Kernel64Bit bool `json:"kernel_64_bit"`
Procs int32 `json:"procs"`
Slowdown int32 `json:"slowdown"`
SyscallTimeoutMs int32 `json:"syscall_timeout_ms"`
@@ -579,6 +581,8 @@ func (t *ConnectReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse
ConnectReplyRawStart(builder)
ConnectReplyRawAddDebug(builder, t.Debug)
ConnectReplyRawAddCover(builder, t.Cover)
+ ConnectReplyRawAddCoverEdges(builder, t.CoverEdges)
+ ConnectReplyRawAddKernel64Bit(builder, t.Kernel64Bit)
ConnectReplyRawAddProcs(builder, t.Procs)
ConnectReplyRawAddSlowdown(builder, t.Slowdown)
ConnectReplyRawAddSyscallTimeoutMs(builder, t.SyscallTimeoutMs)
@@ -594,6 +598,8 @@ func (t *ConnectReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse
func (rcv *ConnectReplyRaw) UnPackTo(t *ConnectReplyRawT) {
t.Debug = rcv.Debug()
t.Cover = rcv.Cover()
+ t.CoverEdges = rcv.CoverEdges()
+ t.Kernel64Bit = rcv.Kernel64Bit()
t.Procs = rcv.Procs()
t.Slowdown = rcv.Slowdown()
t.SyscallTimeoutMs = rcv.SyscallTimeoutMs()
@@ -681,20 +687,44 @@ func (rcv *ConnectReplyRaw) MutateCover(n bool) bool {
return rcv._tab.MutateBoolSlot(6, n)
}
-func (rcv *ConnectReplyRaw) Procs() int32 {
+func (rcv *ConnectReplyRaw) CoverEdges() bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+func (rcv *ConnectReplyRaw) MutateCoverEdges(n bool) bool {
+ return rcv._tab.MutateBoolSlot(8, n)
+}
+
+func (rcv *ConnectReplyRaw) Kernel64Bit() bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+func (rcv *ConnectReplyRaw) MutateKernel64Bit(n bool) bool {
+ return rcv._tab.MutateBoolSlot(10, n)
+}
+
+func (rcv *ConnectReplyRaw) Procs() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
return rcv._tab.GetInt32(o + rcv._tab.Pos)
}
return 0
}
func (rcv *ConnectReplyRaw) MutateProcs(n int32) bool {
- return rcv._tab.MutateInt32Slot(8, n)
+ return rcv._tab.MutateInt32Slot(12, n)
}
func (rcv *ConnectReplyRaw) Slowdown() int32 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
if o != 0 {
return rcv._tab.GetInt32(o + rcv._tab.Pos)
}
@@ -702,11 +732,11 @@ func (rcv *ConnectReplyRaw) Slowdown() int32 {
}
func (rcv *ConnectReplyRaw) MutateSlowdown(n int32) bool {
- return rcv._tab.MutateInt32Slot(10, n)
+ return rcv._tab.MutateInt32Slot(14, n)
}
func (rcv *ConnectReplyRaw) SyscallTimeoutMs() int32 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
if o != 0 {
return rcv._tab.GetInt32(o + rcv._tab.Pos)
}
@@ -714,11 +744,11 @@ func (rcv *ConnectReplyRaw) SyscallTimeoutMs() int32 {
}
func (rcv *ConnectReplyRaw) MutateSyscallTimeoutMs(n int32) bool {
- return rcv._tab.MutateInt32Slot(12, n)
+ return rcv._tab.MutateInt32Slot(16, n)
}
func (rcv *ConnectReplyRaw) ProgramTimeoutMs() int32 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
if o != 0 {
return rcv._tab.GetInt32(o + rcv._tab.Pos)
}
@@ -726,11 +756,11 @@ func (rcv *ConnectReplyRaw) ProgramTimeoutMs() int32 {
}
func (rcv *ConnectReplyRaw) MutateProgramTimeoutMs(n int32) bool {
- return rcv._tab.MutateInt32Slot(14, n)
+ return rcv._tab.MutateInt32Slot(18, n)
}
func (rcv *ConnectReplyRaw) LeakFrames(j int) []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
@@ -739,7 +769,7 @@ func (rcv *ConnectReplyRaw) LeakFrames(j int) []byte {
}
func (rcv *ConnectReplyRaw) LeakFramesLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -747,7 +777,7 @@ func (rcv *ConnectReplyRaw) LeakFramesLength() int {
}
func (rcv *ConnectReplyRaw) RaceFrames(j int) []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
@@ -756,7 +786,7 @@ func (rcv *ConnectReplyRaw) RaceFrames(j int) []byte {
}
func (rcv *ConnectReplyRaw) RaceFramesLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -764,7 +794,7 @@ func (rcv *ConnectReplyRaw) RaceFramesLength() int {
}
func (rcv *ConnectReplyRaw) Features() Feature {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(24))
if o != 0 {
return Feature(rcv._tab.GetUint64(o + rcv._tab.Pos))
}
@@ -772,11 +802,11 @@ func (rcv *ConnectReplyRaw) Features() Feature {
}
func (rcv *ConnectReplyRaw) MutateFeatures(n Feature) bool {
- return rcv._tab.MutateUint64Slot(20, uint64(n))
+ return rcv._tab.MutateUint64Slot(24, uint64(n))
}
func (rcv *ConnectReplyRaw) Files(j int) []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(26))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
@@ -785,7 +815,7 @@ func (rcv *ConnectReplyRaw) Files(j int) []byte {
}
func (rcv *ConnectReplyRaw) FilesLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(26))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -793,7 +823,7 @@ func (rcv *ConnectReplyRaw) FilesLength() int {
}
func (rcv *ConnectReplyRaw) Globs(j int) []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(24))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(28))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
@@ -802,7 +832,7 @@ func (rcv *ConnectReplyRaw) Globs(j int) []byte {
}
func (rcv *ConnectReplyRaw) GlobsLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(24))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(28))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -810,7 +840,7 @@ func (rcv *ConnectReplyRaw) GlobsLength() int {
}
func ConnectReplyRawStart(builder *flatbuffers.Builder) {
- builder.StartObject(11)
+ builder.StartObject(13)
}
func ConnectReplyRawAddDebug(builder *flatbuffers.Builder, debug bool) {
builder.PrependBoolSlot(0, debug, false)
@@ -818,41 +848,47 @@ func ConnectReplyRawAddDebug(builder *flatbuffers.Builder, debug bool) {
func ConnectReplyRawAddCover(builder *flatbuffers.Builder, cover bool) {
builder.PrependBoolSlot(1, cover, false)
}
+func ConnectReplyRawAddCoverEdges(builder *flatbuffers.Builder, coverEdges bool) {
+ builder.PrependBoolSlot(2, coverEdges, false)
+}
+func ConnectReplyRawAddKernel64Bit(builder *flatbuffers.Builder, kernel64Bit bool) {
+ builder.PrependBoolSlot(3, kernel64Bit, false)
+}
func ConnectReplyRawAddProcs(builder *flatbuffers.Builder, procs int32) {
- builder.PrependInt32Slot(2, procs, 0)
+ builder.PrependInt32Slot(4, procs, 0)
}
func ConnectReplyRawAddSlowdown(builder *flatbuffers.Builder, slowdown int32) {
- builder.PrependInt32Slot(3, slowdown, 0)
+ builder.PrependInt32Slot(5, slowdown, 0)
}
func ConnectReplyRawAddSyscallTimeoutMs(builder *flatbuffers.Builder, syscallTimeoutMs int32) {
- builder.PrependInt32Slot(4, syscallTimeoutMs, 0)
+ builder.PrependInt32Slot(6, syscallTimeoutMs, 0)
}
func ConnectReplyRawAddProgramTimeoutMs(builder *flatbuffers.Builder, programTimeoutMs int32) {
- builder.PrependInt32Slot(5, programTimeoutMs, 0)
+ builder.PrependInt32Slot(7, programTimeoutMs, 0)
}
func ConnectReplyRawAddLeakFrames(builder *flatbuffers.Builder, leakFrames flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(6, flatbuffers.UOffsetT(leakFrames), 0)
+ builder.PrependUOffsetTSlot(8, flatbuffers.UOffsetT(leakFrames), 0)
}
func ConnectReplyRawStartLeakFramesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func ConnectReplyRawAddRaceFrames(builder *flatbuffers.Builder, raceFrames flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(7, flatbuffers.UOffsetT(raceFrames), 0)
+ builder.PrependUOffsetTSlot(9, flatbuffers.UOffsetT(raceFrames), 0)
}
func ConnectReplyRawStartRaceFramesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func ConnectReplyRawAddFeatures(builder *flatbuffers.Builder, features Feature) {
- builder.PrependUint64Slot(8, uint64(features), 0)
+ builder.PrependUint64Slot(10, uint64(features), 0)
}
func ConnectReplyRawAddFiles(builder *flatbuffers.Builder, files flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(9, flatbuffers.UOffsetT(files), 0)
+ builder.PrependUOffsetTSlot(11, flatbuffers.UOffsetT(files), 0)
}
func ConnectReplyRawStartFilesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func ConnectReplyRawAddGlobs(builder *flatbuffers.Builder, globs flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(10, flatbuffers.UOffsetT(globs), 0)
+ builder.PrependUOffsetTSlot(12, flatbuffers.UOffsetT(globs), 0)
}
func ConnectReplyRawStartGlobsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
diff --git a/pkg/flatrpc/flatrpc.h b/pkg/flatrpc/flatrpc.h
index e3c2de7af..a2704f9e9 100644
--- a/pkg/flatrpc/flatrpc.h
+++ b/pkg/flatrpc/flatrpc.h
@@ -807,6 +807,8 @@ struct ConnectReplyRawT : public flatbuffers::NativeTable {
typedef ConnectReplyRaw TableType;
bool debug = false;
bool cover = false;
+ bool cover_edges = false;
+ bool kernel_64_bit = false;
int32_t procs = 0;
int32_t slowdown = 0;
int32_t syscall_timeout_ms = 0;
@@ -824,15 +826,17 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_DEBUG = 4,
VT_COVER = 6,
- VT_PROCS = 8,
- VT_SLOWDOWN = 10,
- VT_SYSCALL_TIMEOUT_MS = 12,
- VT_PROGRAM_TIMEOUT_MS = 14,
- VT_LEAK_FRAMES = 16,
- VT_RACE_FRAMES = 18,
- VT_FEATURES = 20,
- VT_FILES = 22,
- VT_GLOBS = 24
+ VT_COVER_EDGES = 8,
+ VT_KERNEL_64_BIT = 10,
+ VT_PROCS = 12,
+ VT_SLOWDOWN = 14,
+ VT_SYSCALL_TIMEOUT_MS = 16,
+ VT_PROGRAM_TIMEOUT_MS = 18,
+ VT_LEAK_FRAMES = 20,
+ VT_RACE_FRAMES = 22,
+ VT_FEATURES = 24,
+ VT_FILES = 26,
+ VT_GLOBS = 28
};
bool debug() const {
return GetField<uint8_t>(VT_DEBUG, 0) != 0;
@@ -840,6 +844,12 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
bool cover() const {
return GetField<uint8_t>(VT_COVER, 0) != 0;
}
+ bool cover_edges() const {
+ return GetField<uint8_t>(VT_COVER_EDGES, 0) != 0;
+ }
+ bool kernel_64_bit() const {
+ return GetField<uint8_t>(VT_KERNEL_64_BIT, 0) != 0;
+ }
int32_t procs() const {
return GetField<int32_t>(VT_PROCS, 0);
}
@@ -871,6 +881,8 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
return VerifyTableStart(verifier) &&
VerifyField<uint8_t>(verifier, VT_DEBUG, 1) &&
VerifyField<uint8_t>(verifier, VT_COVER, 1) &&
+ VerifyField<uint8_t>(verifier, VT_COVER_EDGES, 1) &&
+ VerifyField<uint8_t>(verifier, VT_KERNEL_64_BIT, 1) &&
VerifyField<int32_t>(verifier, VT_PROCS, 4) &&
VerifyField<int32_t>(verifier, VT_SLOWDOWN, 4) &&
VerifyField<int32_t>(verifier, VT_SYSCALL_TIMEOUT_MS, 4) &&
@@ -905,6 +917,12 @@ struct ConnectReplyRawBuilder {
void add_cover(bool cover) {
fbb_.AddElement<uint8_t>(ConnectReplyRaw::VT_COVER, static_cast<uint8_t>(cover), 0);
}
+ void add_cover_edges(bool cover_edges) {
+ fbb_.AddElement<uint8_t>(ConnectReplyRaw::VT_COVER_EDGES, static_cast<uint8_t>(cover_edges), 0);
+ }
+ void add_kernel_64_bit(bool kernel_64_bit) {
+ fbb_.AddElement<uint8_t>(ConnectReplyRaw::VT_KERNEL_64_BIT, static_cast<uint8_t>(kernel_64_bit), 0);
+ }
void add_procs(int32_t procs) {
fbb_.AddElement<int32_t>(ConnectReplyRaw::VT_PROCS, procs, 0);
}
@@ -947,6 +965,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(
flatbuffers::FlatBufferBuilder &_fbb,
bool debug = false,
bool cover = false,
+ bool cover_edges = false,
+ bool kernel_64_bit = false,
int32_t procs = 0,
int32_t slowdown = 0,
int32_t syscall_timeout_ms = 0,
@@ -966,6 +986,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(
builder_.add_syscall_timeout_ms(syscall_timeout_ms);
builder_.add_slowdown(slowdown);
builder_.add_procs(procs);
+ builder_.add_kernel_64_bit(kernel_64_bit);
+ builder_.add_cover_edges(cover_edges);
builder_.add_cover(cover);
builder_.add_debug(debug);
return builder_.Finish();
@@ -975,6 +997,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRawDirect(
flatbuffers::FlatBufferBuilder &_fbb,
bool debug = false,
bool cover = false,
+ bool cover_edges = false,
+ bool kernel_64_bit = false,
int32_t procs = 0,
int32_t slowdown = 0,
int32_t syscall_timeout_ms = 0,
@@ -992,6 +1016,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRawDirect(
_fbb,
debug,
cover,
+ cover_edges,
+ kernel_64_bit,
procs,
slowdown,
syscall_timeout_ms,
@@ -2446,6 +2472,8 @@ inline void ConnectReplyRaw::UnPackTo(ConnectReplyRawT *_o, const flatbuffers::r
(void)_resolver;
{ auto _e = debug(); _o->debug = _e; }
{ auto _e = cover(); _o->cover = _e; }
+ { auto _e = cover_edges(); _o->cover_edges = _e; }
+ { auto _e = kernel_64_bit(); _o->kernel_64_bit = _e; }
{ auto _e = procs(); _o->procs = _e; }
{ auto _e = slowdown(); _o->slowdown = _e; }
{ auto _e = syscall_timeout_ms(); _o->syscall_timeout_ms = _e; }
@@ -2467,6 +2495,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(flatbuffers::F
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConnectReplyRawT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _debug = _o->debug;
auto _cover = _o->cover;
+ auto _cover_edges = _o->cover_edges;
+ auto _kernel_64_bit = _o->kernel_64_bit;
auto _procs = _o->procs;
auto _slowdown = _o->slowdown;
auto _syscall_timeout_ms = _o->syscall_timeout_ms;
@@ -2480,6 +2510,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(flatbuffers::F
_fbb,
_debug,
_cover,
+ _cover_edges,
+ _kernel_64_bit,
_procs,
_slowdown,
_syscall_timeout_ms,
diff --git a/pkg/rpcserver/local.go b/pkg/rpcserver/local.go
index bd58ca4ad..1831259f2 100644
--- a/pkg/rpcserver/local.go
+++ b/pkg/rpcserver/local.go
@@ -38,6 +38,11 @@ type LocalConfig struct {
}
func RunLocal(cfg *LocalConfig) error {
+ if cfg.VMArch == "" {
+ cfg.VMArch = cfg.Target.Arch
+ }
+ cfg.UseCoverEdges = true
+ cfg.FilterSignal = true
cfg.RPC = ":0"
cfg.VMLess = true
cfg.PrintMachineCheck = log.V(1)
diff --git a/pkg/rpcserver/rpcserver.go b/pkg/rpcserver/rpcserver.go
index acf31e868..cdeb6f40b 100644
--- a/pkg/rpcserver/rpcserver.go
+++ b/pkg/rpcserver/rpcserver.go
@@ -30,8 +30,14 @@ import (
type Config struct {
vminfo.Config
- RPC string
- VMLess bool
+ VMArch string
+ RPC string
+ VMLess bool
+ // Hash adjacent PCs to form fuzzing feedback signal (otherwise just use coverage PCs as signal).
+ UseCoverEdges bool
+ // Filter signal/comparisons against target kernel text/data ranges.
+ // Disabled for gVisor/Starnix which are not Linux.
+ FilterSignal bool
PrintMachineCheck bool
Procs int
Slowdown int
@@ -49,12 +55,13 @@ type Server struct {
StatExecs *stats.Val
StatNumFuzzing *stats.Val
- cfg *Config
- mgr Manager
- serv *flatrpc.Serv
- target *prog.Target
- timeouts targets.Timeouts
- checker *vminfo.Checker
+ cfg *Config
+ mgr Manager
+ serv *flatrpc.Serv
+ target *prog.Target
+ sysTarget *targets.Target
+ timeouts targets.Timeouts
+ checker *vminfo.Checker
infoOnce sync.Once
checkDone atomic.Bool
@@ -88,8 +95,13 @@ func New(cfg *mgrconfig.Config, mgr Manager, debug bool) (*Server, error) {
Sandbox: sandbox,
SandboxArg: cfg.SandboxArg,
},
- RPC: cfg.RPC,
- VMLess: cfg.VMLess,
+ VMArch: cfg.TargetVMArch,
+ RPC: cfg.RPC,
+ VMLess: cfg.VMLess,
+ // gVisor coverage is not a trace, so producing edges won't work.
+ UseCoverEdges: cfg.Type != targets.GVisor,
+ // gVisor/Starnix are not Linux, so filtering against Linux ranges won't work.
+ FilterSignal: cfg.Type != targets.GVisor && cfg.Type != targets.Starnix,
PrintMachineCheck: true,
Procs: cfg.Procs,
Slowdown: cfg.Timeouts.Slowdown,
@@ -100,11 +112,14 @@ func newImpl(cfg *Config, mgr Manager) (*Server, error) {
cfg.Procs = min(cfg.Procs, prog.MaxPids)
checker := vminfo.New(&cfg.Config)
baseSource := queue.DynamicSource(checker)
+ // Note that we use VMArch, rather than Arch. We need the kernel address ranges and bitness.
+ sysTarget := targets.Get(cfg.Target.OS, cfg.VMArch)
serv := &Server{
cfg: cfg,
mgr: mgr,
target: cfg.Target,
- timeouts: targets.Get(cfg.Target.OS, cfg.Target.Arch).Timeouts(cfg.Slowdown),
+ sysTarget: sysTarget,
+ timeouts: sysTarget.Timeouts(cfg.Slowdown),
runners: make(map[string]*Runner),
info: make(map[string]VMState),
checker: checker,
@@ -245,6 +260,8 @@ func (serv *Server) handshake(conn *flatrpc.Conn) (string, []byte, *cover.Canoni
connectReply := &flatrpc.ConnectReply{
Debug: serv.cfg.Debug,
Cover: serv.cfg.Cover,
+ CoverEdges: serv.cfg.UseCoverEdges,
+ Kernel64Bit: serv.sysTarget.PtrSize == 8,
Procs: int32(serv.cfg.Procs),
Slowdown: int32(serv.timeouts.Slowdown),
SyscallTimeoutMs: int32(serv.timeouts.Syscall / time.Millisecond),
@@ -421,18 +438,20 @@ func (serv *Server) printMachineCheck(checkFilesInfo []*flatrpc.FileInfo, enable
func (serv *Server) CreateInstance(name string, injectExec chan<- bool) {
runner := &Runner{
- source: serv.execSource,
- cover: serv.cfg.Cover,
- debug: serv.cfg.Debug,
- injectExec: injectExec,
- infoc: make(chan chan []byte),
- finished: make(chan bool),
- requests: make(map[int64]*queue.Request),
- executing: make(map[int64]bool),
- lastExec: MakeLastExecuting(serv.cfg.Procs, 6),
- rnd: rand.New(rand.NewSource(time.Now().UnixNano())),
- stats: serv.runnerStats,
- procs: serv.cfg.Procs,
+ source: serv.execSource,
+ cover: serv.cfg.Cover,
+ filterSignal: serv.cfg.FilterSignal,
+ debug: serv.cfg.Debug,
+ sysTarget: serv.sysTarget,
+ injectExec: injectExec,
+ infoc: make(chan chan []byte),
+ finished: make(chan bool),
+ requests: make(map[int64]*queue.Request),
+ executing: make(map[int64]bool),
+ lastExec: MakeLastExecuting(serv.cfg.Procs, 6),
+ rnd: rand.New(rand.NewSource(time.Now().UnixNano())),
+ stats: serv.runnerStats,
+ procs: serv.cfg.Procs,
}
serv.mu.Lock()
if serv.runners[name] != nil {
diff --git a/pkg/rpcserver/runner.go b/pkg/rpcserver/runner.go
index 0c41346ee..b5903848f 100644
--- a/pkg/rpcserver/runner.go
+++ b/pkg/rpcserver/runner.go
@@ -18,13 +18,16 @@ import (
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/stats"
"github.com/google/syzkaller/prog"
+ "github.com/google/syzkaller/sys/targets"
)
type Runner struct {
source queue.Source
procs int
cover bool
+ filterSignal bool
debug bool
+ sysTarget *targets.Target
stats *runnerStats
stopped bool
finished chan bool
@@ -235,10 +238,8 @@ func (runner *Runner) handleExecResult(msg *flatrpc.ExecResult) error {
// Coverage collection is disabled, but signal was requested => use a substitute signal.
addFallbackSignal(req.Prog, msg.Info)
}
- for i := 0; i < len(msg.Info.Calls); i++ {
- call := msg.Info.Calls[i]
- call.Cover = runner.canonicalizer.Canonicalize(call.Cover)
- call.Signal = runner.canonicalizer.Canonicalize(call.Signal)
+ for _, call := range msg.Info.Calls {
+ runner.convertCallInfo(call)
}
if len(msg.Info.ExtraRaw) != 0 {
msg.Info.Extra = msg.Info.ExtraRaw[0]
@@ -248,9 +249,8 @@ func (runner *Runner) handleExecResult(msg *flatrpc.ExecResult) error {
msg.Info.Extra.Cover = append(msg.Info.Extra.Cover, info.Cover...)
msg.Info.Extra.Signal = append(msg.Info.Extra.Signal, info.Signal...)
}
- msg.Info.Extra.Cover = runner.canonicalizer.Canonicalize(msg.Info.Extra.Cover)
- msg.Info.Extra.Signal = runner.canonicalizer.Canonicalize(msg.Info.Extra.Signal)
msg.Info.ExtraRaw = nil
+ runner.convertCallInfo(msg.Info.Extra)
}
}
status := queue.Success
@@ -268,6 +268,49 @@ func (runner *Runner) handleExecResult(msg *flatrpc.ExecResult) error {
return nil
}
+func (runner *Runner) convertCallInfo(call *flatrpc.CallInfo) {
+ call.Cover = runner.canonicalizer.Canonicalize(call.Cover)
+ call.Signal = runner.canonicalizer.Canonicalize(call.Signal)
+
+ // Check signal belongs to kernel addresses.
+ // Mismatching addresses can mean either corrupted VM memory, or that the fuzzer somehow
+ // managed to inject output signal. If we see any bogus signal, drop whole signal
+ // (we don't want programs that can inject bogus coverage to end up in the corpus).
+ var kernelAddresses targets.KernelAddresses
+ if runner.filterSignal {
+ kernelAddresses = runner.sysTarget.KernelAddresses
+ }
+ textStart, textEnd := kernelAddresses.TextStart, kernelAddresses.TextEnd
+ if textStart != 0 {
+ for _, sig := range call.Signal {
+ if sig < textStart || sig > textEnd {
+ call.Signal = []uint64{}
+ call.Cover = []uint64{}
+ break
+ }
+ }
+ }
+
+ // Filter out kernel physical memory addresses.
+ // These are internal kernel comparisons and should not be interesting.
+ dataStart, dataEnd := kernelAddresses.DataStart, kernelAddresses.DataEnd
+ if len(call.Comps) != 0 && (textStart != 0 || dataStart != 0) {
+ if runner.sysTarget.PtrSize == 4 {
+ // These will appear sign-extended in comparison operands.
+ textStart = uint64(int64(int32(textStart)))
+ textEnd = uint64(int64(int32(textEnd)))
+ dataStart = uint64(int64(int32(dataStart)))
+ dataEnd = uint64(int64(int32(dataEnd)))
+ }
+ isKptr := func(val uint64) bool {
+ return val >= textStart && val <= textEnd || val >= dataStart && val <= dataEnd || val == 0
+ }
+ call.Comps = slices.DeleteFunc(call.Comps, func(cmp *flatrpc.Comparison) bool {
+ return isKptr(cmp.Op1) && isKptr(cmp.Op2)
+ })
+ }
+}
+
func (runner *Runner) sendSignalUpdate(plus, minus []uint64) error {
msg := &flatrpc.HostMessage{
Msg: &flatrpc.HostMessages{
diff --git a/pkg/runtest/executor_test.go b/pkg/runtest/executor_test.go
index 4bdadfd52..72889669d 100644
--- a/pkg/runtest/executor_test.go
+++ b/pkg/runtest/executor_test.go
@@ -61,7 +61,7 @@ func TestZlib(t *testing.T) {
}
executor := csource.BuildExecutor(t, target, "../..")
source := queue.Plain()
- startRpcserver(t, target, executor, source, nil, nil, nil)
+ startRPCServer(t, target, executor, "", source, nil, nil, nil)
r := rand.New(testutil.RandSource(t))
for i := 0; i < 10; i++ {
data := testutil.RandMountImage(r)
@@ -111,7 +111,7 @@ func TestExecutorCommonExt(t *testing.T) {
t.Fatal(err)
}
source := queue.Plain()
- startRpcserver(t, target, executor, source, nil, nil, nil)
+ startRPCServer(t, target, executor, "", source, nil, nil, nil)
req := &queue.Request{
Prog: p,
ReturnError: true,
diff --git a/pkg/runtest/run_test.go b/pkg/runtest/run_test.go
index 4c5cde642..f69173ef8 100644
--- a/pkg/runtest/run_test.go
+++ b/pkg/runtest/run_test.go
@@ -83,7 +83,7 @@ func test(t *testing.T, sysTarget *targets.Target) {
Verbose: true,
Debug: *flagDebug,
}
- startRpcserver(t, target, executor, ctx, nil, nil, func(features flatrpc.Feature) {
+ startRPCServer(t, target, executor, "", ctx, nil, nil, func(features flatrpc.Feature) {
// Features we expect to be enabled on the test OS.
// All sandboxes except for none are not implemented, coverage is not returned,
// and setup for few features is failing specifically to test feature detection.
@@ -138,7 +138,7 @@ func TestCover(t *testing.T) {
}
type CoverTest struct {
- Is64Bit int
+ Is64Bit bool
Input []byte
MaxSignal []uint64
CoverFilter []uint64
@@ -168,18 +168,18 @@ func testCover(t *testing.T, target *prog.Target) {
tests := []CoverTest{
// Empty coverage.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(),
Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
},
{
- Is64Bit: 0,
+ Is64Bit: false,
Input: makeCover32(),
Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
},
// Single 64-bit PC.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0112233),
Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
Cover: []uint64{0xc0dec0dec0112233},
@@ -187,7 +187,7 @@ func testCover(t *testing.T, target *prog.Target) {
},
// Single 32-bit PC.
{
- Is64Bit: 0,
+ Is64Bit: false,
Input: makeCover32(0xc0112233),
Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
Cover: []uint64{0xc0112233},
@@ -195,20 +195,20 @@ func testCover(t *testing.T, target *prog.Target) {
},
// Ensure we don't sent cover/signal when not requested.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0112233),
Flags: flatrpc.ExecFlagCollectCover,
Cover: []uint64{0xc0dec0dec0112233},
},
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0112233),
Flags: flatrpc.ExecFlagCollectSignal,
Signal: []uint64{0xc0dec0dec0112233},
},
// Coverage deduplication.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011,
0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033, 0xc0dec0dec0000011),
Flags: flatrpc.ExecFlagCollectCover,
@@ -216,7 +216,7 @@ func testCover(t *testing.T, target *prog.Target) {
0xc0dec0dec0000011, 0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033},
},
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011,
0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033, 0xc0dec0dec0000011),
Flags: flatrpc.ExecFlagCollectCover | flatrpc.ExecFlagDedupCover,
@@ -224,7 +224,7 @@ func testCover(t *testing.T, target *prog.Target) {
},
// Signal hashing.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0011001, 0xc0dec0dec0022002, 0xc0dec0dec00330f0,
0xc0dec0dec0044b00, 0xc0dec0dec0011001, 0xc0dec0dec0022002),
Flags: flatrpc.ExecFlagCollectSignal,
@@ -233,18 +233,18 @@ func testCover(t *testing.T, target *prog.Target) {
},
// Invalid non-kernel PCs must fail test execution.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0000022, 0xc000000000000033),
Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
},
{
- Is64Bit: 0,
+ Is64Bit: false,
Input: makeCover32(0x33),
Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
},
// 64-bit comparisons.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeComps(
// A normal 8-byte comparison must be returned in the output as is.
Comparison{CmpSize8 | CmpConst, 0x1111111111111111, 0x2222222222222222, 0},
@@ -287,14 +287,14 @@ func testCover(t *testing.T, target *prog.Target) {
},
// 32-bit comparisons must be the same, so test only a subset.
{
- Is64Bit: 0,
+ Is64Bit: false,
Input: makeComps(
Comparison{CmpSize8 | CmpConst, 0x1111111111111111, 0x2222222222222222, 0},
Comparison{CmpSize2 | CmpConst, 0xabcd, 0x4321, 0},
Comparison{CmpSize4 | CmpConst, 0xda1a0000, 0xda1a1000, 0},
Comparison{CmpSize8 | CmpConst, 0xc0dec0dec0de0000, 0xc0dec0dec0de1000, 0},
Comparison{CmpSize4 | CmpConst, 0xc0de0000, 0xc0de1000, 0},
- Comparison{CmpSize8 | CmpConst, 0xc0de0011, 0xc0de1022, 0},
+ Comparison{CmpSize4 | CmpConst, 0xc0de0011, 0xc0de1022, 0},
),
Flags: flatrpc.ExecFlagCollectComps,
Comps: [][2]uint64{
@@ -305,7 +305,7 @@ func testCover(t *testing.T, target *prog.Target) {
},
// Test max signal.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0000001, 0xc0dec0dec0000010, 0xc0dec0dec0000002,
0xc0dec0dec0000100, 0xc0dec0dec0001000),
MaxSignal: []uint64{0xc0dec0dec0000001, 0xc0dec0dec0000013, 0xc0dec0dec0000abc},
@@ -315,7 +315,7 @@ func testCover(t *testing.T, target *prog.Target) {
Signal: []uint64{0xc0dec0dec0001100, 0xc0dec0dec0000102},
},
{
- Is64Bit: 0,
+ Is64Bit: false,
Input: makeCover32(0xc0000001, 0xc0000010, 0xc0000002, 0xc0000100, 0xc0001000),
MaxSignal: []uint64{0xc0000001, 0xc0000013, 0xc0000abc},
Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
@@ -323,7 +323,7 @@ func testCover(t *testing.T, target *prog.Target) {
Signal: []uint64{0xc0001100, 0xc0000102},
},
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0000001, 0xc0dec0dec0000010, 0xc0dec0dec0000002,
0xc0dec0dec0000100, 0xc0dec0dec0001000),
MaxSignal: []uint64{0xc0dec0dec0000001, 0xc0dec0dec0000013, 0xc0dec0dec0000abc},
@@ -334,7 +334,7 @@ func testCover(t *testing.T, target *prog.Target) {
},
// Test cover filter.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0000001, 0xc0dec0dec0000010, 0xc0dec0dec0000020,
0xc0dec0dec0000040, 0xc0dec0dec0000100, 0xc0dec0dec0001000, 0xc0dec0dec0002000),
CoverFilter: []uint64{0xc0dec0dec0000002, 0xc0dec0dec0000100},
@@ -344,7 +344,7 @@ func testCover(t *testing.T, target *prog.Target) {
Signal: []uint64{0xc0dec0dec0001100, 0xc0dec0dec0000140, 0xc0dec0dec0000011, 0xc0dec0dec0000001},
},
{
- Is64Bit: 0,
+ Is64Bit: false,
Input: makeCover32(0xc0000001, 0xc0000010, 0xc0000020, 0xc0000040,
0xc0000100, 0xc0001000, 0xc0002000),
CoverFilter: []uint64{0xc0000002, 0xc0000100},
@@ -355,24 +355,23 @@ func testCover(t *testing.T, target *prog.Target) {
},
}
executor := csource.BuildExecutor(t, target, "../../")
- source := queue.Plain()
- startRpcserver(t, target, executor, source, nil, nil, nil)
for i, test := range tests {
test := test
t.Run(fmt.Sprint(i), func(t *testing.T) {
t.Parallel()
- mysource := source
- if len(test.MaxSignal)+len(test.CoverFilter) != 0 {
- mysource = queue.Plain()
- startRpcserver(t, target, executor, mysource, test.MaxSignal, test.CoverFilter, nil)
+ source := queue.Plain()
+ vmArch := targets.TestArch32
+ if test.Is64Bit {
+ vmArch = targets.TestArch64
}
- testCover1(t, target, test, mysource)
+ startRPCServer(t, target, executor, vmArch, source, test.MaxSignal, test.CoverFilter, nil)
+ testCover1(t, target, test, source)
})
}
}
func testCover1(t *testing.T, target *prog.Target, test CoverTest, source *queue.PlainQueue) {
- text := fmt.Sprintf(`syz_inject_cover(0x%v, &AUTO="%s", AUTO)`, test.Is64Bit, hex.EncodeToString(test.Input))
+ text := fmt.Sprintf(`syz_inject_cover(&AUTO="%s", AUTO)`, hex.EncodeToString(test.Input))
p, err := target.Deserialize([]byte(text), prog.Strict)
if err != nil {
t.Fatal(err)
@@ -436,7 +435,7 @@ func makeComps(comps ...Comparison) []byte {
return w.Bytes()
}
-func startRpcserver(t *testing.T, target *prog.Target, executor string, source queue.Source,
+func startRPCServer(t *testing.T, target *prog.Target, executor, vmArch string, source queue.Source,
maxSignal, coverFilter []uint64, machineChecked func(features flatrpc.Feature)) {
ctx, done := context.WithCancel(context.Background())
cfg := &rpcserver.LocalConfig{
@@ -448,6 +447,7 @@ func startRpcserver(t *testing.T, target *prog.Target, executor string, source q
Features: flatrpc.AllFeatures,
Sandbox: flatrpc.ExecEnvSandboxNone,
},
+ VMArch: vmArch,
Procs: runtime.GOMAXPROCS(0),
Slowdown: 10, // to deflake slower tests
},
diff --git a/sys/targets/targets.go b/sys/targets/targets.go
index fd5d6f10c..a623dbfb6 100644
--- a/sys/targets/targets.go
+++ b/sys/targets/targets.go
@@ -42,6 +42,7 @@ type Target struct {
HostEndian binary.ByteOrder
SyscallTrampolines map[string]string
Addr2Line func() (string, error)
+ KernelAddresses KernelAddresses
init *sync.Once
initOther *sync.Once
@@ -50,6 +51,16 @@ type Target struct {
timeouts Timeouts
}
+// KernelAddresses contain approximate rounded up kernel text/data ranges
+// that are used to filter signal and comparisons for bogus/unuseful entries.
+// Zero values mean no filtering.
+type KernelAddresses struct {
+ TextStart uint64
+ TextEnd uint64
+ DataStart uint64
+ DataEnd uint64
+}
+
func (target *Target) HasCallNumber(callName string) bool {
return target.SyscallNumbers && !strings.HasPrefix(callName, "syz_")
}
@@ -259,6 +270,15 @@ var List = map[string]map[string]*Target{
// (added after commit 8a1ab3155c2ac on 2012-10-04).
return nr >= 313
},
+ KernelAddresses: KernelAddresses{
+ // Text/modules range for x86_64.
+ TextStart: 0xffffffff80000000,
+ TextEnd: 0xffffffffff000000,
+ // This range corresponds to the first 1TB of the physical memory mapping,
+ // see Documentation/arch/x86/x86_64/mm.rst.
+ DataStart: 0xffff880000000000,
+ DataEnd: 0xffff890000000000,
+ },
},
I386: {
VMArch: AMD64,
@@ -655,6 +675,15 @@ func init() {
if runtime.GOOS == OpenBSD {
target.BrokenCompiler = "can't build TestOS on OpenBSD due to missing syscall function."
}
+ // These are used only for pkg/runtest tests, executor also knows about these values.
+ target.KernelAddresses.TextStart = 0xc0dec0dec0000000
+ target.KernelAddresses.TextEnd = 0xc0dec0dec1000000
+ if target.PtrSize == 4 {
+ target.KernelAddresses.TextStart = uint64(uint32(target.KernelAddresses.TextStart))
+ target.KernelAddresses.TextEnd = uint64(uint32(target.KernelAddresses.TextEnd))
+ }
+ target.KernelAddresses.DataStart = 0xda1a0000
+ target.KernelAddresses.DataEnd = 0xda1a1000
}
}
diff --git a/sys/test/exec.txt b/sys/test/exec.txt
index ffb56610b..fb895bd18 100644
--- a/sys/test/exec.txt
+++ b/sys/test/exec.txt
@@ -11,8 +11,8 @@ syz_compare_int$3(n const[3], v0 intptr, v1 intptr, v2 intptr)
syz_compare_int$4(n const[4], v0 intptr, v1 intptr, v2 intptr, v3 intptr)
syz_compare_zlib(data ptr[in, array[int8]], size bytesize[data], zdata ptr[in, compressed_image], zsize bytesize[zdata]) (timeout[4000], no_generate, no_minimize)
-# Copies the data into KCOV buffer verbatim and sets assumed kernel bitness.
-syz_inject_cover(is64 bool8, ptr ptr[in, array[int8]], size bytesize[ptr])
+# Copies the data into KCOV buffer verbatim.
+syz_inject_cover(ptr ptr[in, array[int8]], size bytesize[ptr])
compare_data [
align0 align0