aboutsummaryrefslogtreecommitdiffstats
path: root/pkg
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2024-06-28 16:33:04 +0200
committerDmitry Vyukov <dvyukov@google.com>2024-07-01 13:48:43 +0000
commita6f99ace4014896f81a2f101416fd5413579f2bd (patch)
treec6ace6c5a8736261fd462e83e19bbb88bd1a2ee3 /pkg
parent1f0ee43044bc8fc00bc1eccc85a93bf2b9972dd1 (diff)
pkg/rpcserver: move kernel test/data range checks from executor
We see some errors of the form: SYZFAIL: coverage filter is full pc=0x80007000c0008 regions=[0xffffffffbfffffff 0x243fffffff 0x143fffffff 0xc3fffffff] alloc=156 Executor shouldn't send non kernel addresses in signal, but somehow it does. It can happen if the VM memory is corrupted, or if the test program does something very nasty (e.g. discovers the output region and writes to it). It's not possible to reliably filter signal in the tested VM. Move all of the filtering logic to the host. Fixes #4942
Diffstat (limited to 'pkg')
-rw-r--r--pkg/flatrpc/flatrpc.fbs2
-rw-r--r--pkg/flatrpc/flatrpc.go92
-rw-r--r--pkg/flatrpc/flatrpc.h50
-rw-r--r--pkg/rpcserver/local.go5
-rw-r--r--pkg/rpcserver/rpcserver.go65
-rw-r--r--pkg/rpcserver/runner.go55
-rw-r--r--pkg/runtest/executor_test.go4
-rw-r--r--pkg/runtest/run_test.go60
8 files changed, 235 insertions, 98 deletions
diff --git a/pkg/flatrpc/flatrpc.fbs b/pkg/flatrpc/flatrpc.fbs
index 58a6b3250..f0b03c4a9 100644
--- a/pkg/flatrpc/flatrpc.fbs
+++ b/pkg/flatrpc/flatrpc.fbs
@@ -37,6 +37,8 @@ table ConnectRequestRaw {
table ConnectReplyRaw {
debug :bool;
cover :bool;
+ cover_edges :bool;
+ kernel_64_bit :bool;
procs :int32;
slowdown :int32;
syscall_timeout_ms :int32;
diff --git a/pkg/flatrpc/flatrpc.go b/pkg/flatrpc/flatrpc.go
index 0e46dddbd..87f6ad19d 100644
--- a/pkg/flatrpc/flatrpc.go
+++ b/pkg/flatrpc/flatrpc.go
@@ -509,6 +509,8 @@ func ConnectRequestRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
type ConnectReplyRawT struct {
Debug bool `json:"debug"`
Cover bool `json:"cover"`
+ CoverEdges bool `json:"cover_edges"`
+ Kernel64Bit bool `json:"kernel_64_bit"`
Procs int32 `json:"procs"`
Slowdown int32 `json:"slowdown"`
SyscallTimeoutMs int32 `json:"syscall_timeout_ms"`
@@ -579,6 +581,8 @@ func (t *ConnectReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse
ConnectReplyRawStart(builder)
ConnectReplyRawAddDebug(builder, t.Debug)
ConnectReplyRawAddCover(builder, t.Cover)
+ ConnectReplyRawAddCoverEdges(builder, t.CoverEdges)
+ ConnectReplyRawAddKernel64Bit(builder, t.Kernel64Bit)
ConnectReplyRawAddProcs(builder, t.Procs)
ConnectReplyRawAddSlowdown(builder, t.Slowdown)
ConnectReplyRawAddSyscallTimeoutMs(builder, t.SyscallTimeoutMs)
@@ -594,6 +598,8 @@ func (t *ConnectReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse
func (rcv *ConnectReplyRaw) UnPackTo(t *ConnectReplyRawT) {
t.Debug = rcv.Debug()
t.Cover = rcv.Cover()
+ t.CoverEdges = rcv.CoverEdges()
+ t.Kernel64Bit = rcv.Kernel64Bit()
t.Procs = rcv.Procs()
t.Slowdown = rcv.Slowdown()
t.SyscallTimeoutMs = rcv.SyscallTimeoutMs()
@@ -681,20 +687,44 @@ func (rcv *ConnectReplyRaw) MutateCover(n bool) bool {
return rcv._tab.MutateBoolSlot(6, n)
}
-func (rcv *ConnectReplyRaw) Procs() int32 {
+func (rcv *ConnectReplyRaw) CoverEdges() bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+func (rcv *ConnectReplyRaw) MutateCoverEdges(n bool) bool {
+ return rcv._tab.MutateBoolSlot(8, n)
+}
+
+func (rcv *ConnectReplyRaw) Kernel64Bit() bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+func (rcv *ConnectReplyRaw) MutateKernel64Bit(n bool) bool {
+ return rcv._tab.MutateBoolSlot(10, n)
+}
+
+func (rcv *ConnectReplyRaw) Procs() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
return rcv._tab.GetInt32(o + rcv._tab.Pos)
}
return 0
}
func (rcv *ConnectReplyRaw) MutateProcs(n int32) bool {
- return rcv._tab.MutateInt32Slot(8, n)
+ return rcv._tab.MutateInt32Slot(12, n)
}
func (rcv *ConnectReplyRaw) Slowdown() int32 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
if o != 0 {
return rcv._tab.GetInt32(o + rcv._tab.Pos)
}
@@ -702,11 +732,11 @@ func (rcv *ConnectReplyRaw) Slowdown() int32 {
}
func (rcv *ConnectReplyRaw) MutateSlowdown(n int32) bool {
- return rcv._tab.MutateInt32Slot(10, n)
+ return rcv._tab.MutateInt32Slot(14, n)
}
func (rcv *ConnectReplyRaw) SyscallTimeoutMs() int32 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
if o != 0 {
return rcv._tab.GetInt32(o + rcv._tab.Pos)
}
@@ -714,11 +744,11 @@ func (rcv *ConnectReplyRaw) SyscallTimeoutMs() int32 {
}
func (rcv *ConnectReplyRaw) MutateSyscallTimeoutMs(n int32) bool {
- return rcv._tab.MutateInt32Slot(12, n)
+ return rcv._tab.MutateInt32Slot(16, n)
}
func (rcv *ConnectReplyRaw) ProgramTimeoutMs() int32 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
if o != 0 {
return rcv._tab.GetInt32(o + rcv._tab.Pos)
}
@@ -726,11 +756,11 @@ func (rcv *ConnectReplyRaw) ProgramTimeoutMs() int32 {
}
func (rcv *ConnectReplyRaw) MutateProgramTimeoutMs(n int32) bool {
- return rcv._tab.MutateInt32Slot(14, n)
+ return rcv._tab.MutateInt32Slot(18, n)
}
func (rcv *ConnectReplyRaw) LeakFrames(j int) []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
@@ -739,7 +769,7 @@ func (rcv *ConnectReplyRaw) LeakFrames(j int) []byte {
}
func (rcv *ConnectReplyRaw) LeakFramesLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -747,7 +777,7 @@ func (rcv *ConnectReplyRaw) LeakFramesLength() int {
}
func (rcv *ConnectReplyRaw) RaceFrames(j int) []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
@@ -756,7 +786,7 @@ func (rcv *ConnectReplyRaw) RaceFrames(j int) []byte {
}
func (rcv *ConnectReplyRaw) RaceFramesLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -764,7 +794,7 @@ func (rcv *ConnectReplyRaw) RaceFramesLength() int {
}
func (rcv *ConnectReplyRaw) Features() Feature {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(24))
if o != 0 {
return Feature(rcv._tab.GetUint64(o + rcv._tab.Pos))
}
@@ -772,11 +802,11 @@ func (rcv *ConnectReplyRaw) Features() Feature {
}
func (rcv *ConnectReplyRaw) MutateFeatures(n Feature) bool {
- return rcv._tab.MutateUint64Slot(20, uint64(n))
+ return rcv._tab.MutateUint64Slot(24, uint64(n))
}
func (rcv *ConnectReplyRaw) Files(j int) []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(26))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
@@ -785,7 +815,7 @@ func (rcv *ConnectReplyRaw) Files(j int) []byte {
}
func (rcv *ConnectReplyRaw) FilesLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(26))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -793,7 +823,7 @@ func (rcv *ConnectReplyRaw) FilesLength() int {
}
func (rcv *ConnectReplyRaw) Globs(j int) []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(24))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(28))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
@@ -802,7 +832,7 @@ func (rcv *ConnectReplyRaw) Globs(j int) []byte {
}
func (rcv *ConnectReplyRaw) GlobsLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(24))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(28))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -810,7 +840,7 @@ func (rcv *ConnectReplyRaw) GlobsLength() int {
}
func ConnectReplyRawStart(builder *flatbuffers.Builder) {
- builder.StartObject(11)
+ builder.StartObject(13)
}
func ConnectReplyRawAddDebug(builder *flatbuffers.Builder, debug bool) {
builder.PrependBoolSlot(0, debug, false)
@@ -818,41 +848,47 @@ func ConnectReplyRawAddDebug(builder *flatbuffers.Builder, debug bool) {
func ConnectReplyRawAddCover(builder *flatbuffers.Builder, cover bool) {
builder.PrependBoolSlot(1, cover, false)
}
+func ConnectReplyRawAddCoverEdges(builder *flatbuffers.Builder, coverEdges bool) {
+ builder.PrependBoolSlot(2, coverEdges, false)
+}
+func ConnectReplyRawAddKernel64Bit(builder *flatbuffers.Builder, kernel64Bit bool) {
+ builder.PrependBoolSlot(3, kernel64Bit, false)
+}
func ConnectReplyRawAddProcs(builder *flatbuffers.Builder, procs int32) {
- builder.PrependInt32Slot(2, procs, 0)
+ builder.PrependInt32Slot(4, procs, 0)
}
func ConnectReplyRawAddSlowdown(builder *flatbuffers.Builder, slowdown int32) {
- builder.PrependInt32Slot(3, slowdown, 0)
+ builder.PrependInt32Slot(5, slowdown, 0)
}
func ConnectReplyRawAddSyscallTimeoutMs(builder *flatbuffers.Builder, syscallTimeoutMs int32) {
- builder.PrependInt32Slot(4, syscallTimeoutMs, 0)
+ builder.PrependInt32Slot(6, syscallTimeoutMs, 0)
}
func ConnectReplyRawAddProgramTimeoutMs(builder *flatbuffers.Builder, programTimeoutMs int32) {
- builder.PrependInt32Slot(5, programTimeoutMs, 0)
+ builder.PrependInt32Slot(7, programTimeoutMs, 0)
}
func ConnectReplyRawAddLeakFrames(builder *flatbuffers.Builder, leakFrames flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(6, flatbuffers.UOffsetT(leakFrames), 0)
+ builder.PrependUOffsetTSlot(8, flatbuffers.UOffsetT(leakFrames), 0)
}
func ConnectReplyRawStartLeakFramesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func ConnectReplyRawAddRaceFrames(builder *flatbuffers.Builder, raceFrames flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(7, flatbuffers.UOffsetT(raceFrames), 0)
+ builder.PrependUOffsetTSlot(9, flatbuffers.UOffsetT(raceFrames), 0)
}
func ConnectReplyRawStartRaceFramesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func ConnectReplyRawAddFeatures(builder *flatbuffers.Builder, features Feature) {
- builder.PrependUint64Slot(8, uint64(features), 0)
+ builder.PrependUint64Slot(10, uint64(features), 0)
}
func ConnectReplyRawAddFiles(builder *flatbuffers.Builder, files flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(9, flatbuffers.UOffsetT(files), 0)
+ builder.PrependUOffsetTSlot(11, flatbuffers.UOffsetT(files), 0)
}
func ConnectReplyRawStartFilesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func ConnectReplyRawAddGlobs(builder *flatbuffers.Builder, globs flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(10, flatbuffers.UOffsetT(globs), 0)
+ builder.PrependUOffsetTSlot(12, flatbuffers.UOffsetT(globs), 0)
}
func ConnectReplyRawStartGlobsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
diff --git a/pkg/flatrpc/flatrpc.h b/pkg/flatrpc/flatrpc.h
index e3c2de7af..a2704f9e9 100644
--- a/pkg/flatrpc/flatrpc.h
+++ b/pkg/flatrpc/flatrpc.h
@@ -807,6 +807,8 @@ struct ConnectReplyRawT : public flatbuffers::NativeTable {
typedef ConnectReplyRaw TableType;
bool debug = false;
bool cover = false;
+ bool cover_edges = false;
+ bool kernel_64_bit = false;
int32_t procs = 0;
int32_t slowdown = 0;
int32_t syscall_timeout_ms = 0;
@@ -824,15 +826,17 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_DEBUG = 4,
VT_COVER = 6,
- VT_PROCS = 8,
- VT_SLOWDOWN = 10,
- VT_SYSCALL_TIMEOUT_MS = 12,
- VT_PROGRAM_TIMEOUT_MS = 14,
- VT_LEAK_FRAMES = 16,
- VT_RACE_FRAMES = 18,
- VT_FEATURES = 20,
- VT_FILES = 22,
- VT_GLOBS = 24
+ VT_COVER_EDGES = 8,
+ VT_KERNEL_64_BIT = 10,
+ VT_PROCS = 12,
+ VT_SLOWDOWN = 14,
+ VT_SYSCALL_TIMEOUT_MS = 16,
+ VT_PROGRAM_TIMEOUT_MS = 18,
+ VT_LEAK_FRAMES = 20,
+ VT_RACE_FRAMES = 22,
+ VT_FEATURES = 24,
+ VT_FILES = 26,
+ VT_GLOBS = 28
};
bool debug() const {
return GetField<uint8_t>(VT_DEBUG, 0) != 0;
@@ -840,6 +844,12 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
bool cover() const {
return GetField<uint8_t>(VT_COVER, 0) != 0;
}
+ bool cover_edges() const {
+ return GetField<uint8_t>(VT_COVER_EDGES, 0) != 0;
+ }
+ bool kernel_64_bit() const {
+ return GetField<uint8_t>(VT_KERNEL_64_BIT, 0) != 0;
+ }
int32_t procs() const {
return GetField<int32_t>(VT_PROCS, 0);
}
@@ -871,6 +881,8 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
return VerifyTableStart(verifier) &&
VerifyField<uint8_t>(verifier, VT_DEBUG, 1) &&
VerifyField<uint8_t>(verifier, VT_COVER, 1) &&
+ VerifyField<uint8_t>(verifier, VT_COVER_EDGES, 1) &&
+ VerifyField<uint8_t>(verifier, VT_KERNEL_64_BIT, 1) &&
VerifyField<int32_t>(verifier, VT_PROCS, 4) &&
VerifyField<int32_t>(verifier, VT_SLOWDOWN, 4) &&
VerifyField<int32_t>(verifier, VT_SYSCALL_TIMEOUT_MS, 4) &&
@@ -905,6 +917,12 @@ struct ConnectReplyRawBuilder {
void add_cover(bool cover) {
fbb_.AddElement<uint8_t>(ConnectReplyRaw::VT_COVER, static_cast<uint8_t>(cover), 0);
}
+ void add_cover_edges(bool cover_edges) {
+ fbb_.AddElement<uint8_t>(ConnectReplyRaw::VT_COVER_EDGES, static_cast<uint8_t>(cover_edges), 0);
+ }
+ void add_kernel_64_bit(bool kernel_64_bit) {
+ fbb_.AddElement<uint8_t>(ConnectReplyRaw::VT_KERNEL_64_BIT, static_cast<uint8_t>(kernel_64_bit), 0);
+ }
void add_procs(int32_t procs) {
fbb_.AddElement<int32_t>(ConnectReplyRaw::VT_PROCS, procs, 0);
}
@@ -947,6 +965,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(
flatbuffers::FlatBufferBuilder &_fbb,
bool debug = false,
bool cover = false,
+ bool cover_edges = false,
+ bool kernel_64_bit = false,
int32_t procs = 0,
int32_t slowdown = 0,
int32_t syscall_timeout_ms = 0,
@@ -966,6 +986,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(
builder_.add_syscall_timeout_ms(syscall_timeout_ms);
builder_.add_slowdown(slowdown);
builder_.add_procs(procs);
+ builder_.add_kernel_64_bit(kernel_64_bit);
+ builder_.add_cover_edges(cover_edges);
builder_.add_cover(cover);
builder_.add_debug(debug);
return builder_.Finish();
@@ -975,6 +997,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRawDirect(
flatbuffers::FlatBufferBuilder &_fbb,
bool debug = false,
bool cover = false,
+ bool cover_edges = false,
+ bool kernel_64_bit = false,
int32_t procs = 0,
int32_t slowdown = 0,
int32_t syscall_timeout_ms = 0,
@@ -992,6 +1016,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRawDirect(
_fbb,
debug,
cover,
+ cover_edges,
+ kernel_64_bit,
procs,
slowdown,
syscall_timeout_ms,
@@ -2446,6 +2472,8 @@ inline void ConnectReplyRaw::UnPackTo(ConnectReplyRawT *_o, const flatbuffers::r
(void)_resolver;
{ auto _e = debug(); _o->debug = _e; }
{ auto _e = cover(); _o->cover = _e; }
+ { auto _e = cover_edges(); _o->cover_edges = _e; }
+ { auto _e = kernel_64_bit(); _o->kernel_64_bit = _e; }
{ auto _e = procs(); _o->procs = _e; }
{ auto _e = slowdown(); _o->slowdown = _e; }
{ auto _e = syscall_timeout_ms(); _o->syscall_timeout_ms = _e; }
@@ -2467,6 +2495,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(flatbuffers::F
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConnectReplyRawT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _debug = _o->debug;
auto _cover = _o->cover;
+ auto _cover_edges = _o->cover_edges;
+ auto _kernel_64_bit = _o->kernel_64_bit;
auto _procs = _o->procs;
auto _slowdown = _o->slowdown;
auto _syscall_timeout_ms = _o->syscall_timeout_ms;
@@ -2480,6 +2510,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(flatbuffers::F
_fbb,
_debug,
_cover,
+ _cover_edges,
+ _kernel_64_bit,
_procs,
_slowdown,
_syscall_timeout_ms,
diff --git a/pkg/rpcserver/local.go b/pkg/rpcserver/local.go
index bd58ca4ad..1831259f2 100644
--- a/pkg/rpcserver/local.go
+++ b/pkg/rpcserver/local.go
@@ -38,6 +38,11 @@ type LocalConfig struct {
}
func RunLocal(cfg *LocalConfig) error {
+ if cfg.VMArch == "" {
+ cfg.VMArch = cfg.Target.Arch
+ }
+ cfg.UseCoverEdges = true
+ cfg.FilterSignal = true
cfg.RPC = ":0"
cfg.VMLess = true
cfg.PrintMachineCheck = log.V(1)
diff --git a/pkg/rpcserver/rpcserver.go b/pkg/rpcserver/rpcserver.go
index acf31e868..cdeb6f40b 100644
--- a/pkg/rpcserver/rpcserver.go
+++ b/pkg/rpcserver/rpcserver.go
@@ -30,8 +30,14 @@ import (
type Config struct {
vminfo.Config
- RPC string
- VMLess bool
+ VMArch string
+ RPC string
+ VMLess bool
+ // Hash adjacent PCs to form fuzzing feedback signal (otherwise just use coverage PCs as signal).
+ UseCoverEdges bool
+ // Filter signal/comparisons against target kernel text/data ranges.
+ // Disabled for gVisor/Starnix which are not Linux.
+ FilterSignal bool
PrintMachineCheck bool
Procs int
Slowdown int
@@ -49,12 +55,13 @@ type Server struct {
StatExecs *stats.Val
StatNumFuzzing *stats.Val
- cfg *Config
- mgr Manager
- serv *flatrpc.Serv
- target *prog.Target
- timeouts targets.Timeouts
- checker *vminfo.Checker
+ cfg *Config
+ mgr Manager
+ serv *flatrpc.Serv
+ target *prog.Target
+ sysTarget *targets.Target
+ timeouts targets.Timeouts
+ checker *vminfo.Checker
infoOnce sync.Once
checkDone atomic.Bool
@@ -88,8 +95,13 @@ func New(cfg *mgrconfig.Config, mgr Manager, debug bool) (*Server, error) {
Sandbox: sandbox,
SandboxArg: cfg.SandboxArg,
},
- RPC: cfg.RPC,
- VMLess: cfg.VMLess,
+ VMArch: cfg.TargetVMArch,
+ RPC: cfg.RPC,
+ VMLess: cfg.VMLess,
+ // gVisor coverage is not a trace, so producing edges won't work.
+ UseCoverEdges: cfg.Type != targets.GVisor,
+ // gVisor/Starnix are not Linux, so filtering against Linux ranges won't work.
+ FilterSignal: cfg.Type != targets.GVisor && cfg.Type != targets.Starnix,
PrintMachineCheck: true,
Procs: cfg.Procs,
Slowdown: cfg.Timeouts.Slowdown,
@@ -100,11 +112,14 @@ func newImpl(cfg *Config, mgr Manager) (*Server, error) {
cfg.Procs = min(cfg.Procs, prog.MaxPids)
checker := vminfo.New(&cfg.Config)
baseSource := queue.DynamicSource(checker)
+ // Note that we use VMArch, rather than Arch. We need the kernel address ranges and bitness.
+ sysTarget := targets.Get(cfg.Target.OS, cfg.VMArch)
serv := &Server{
cfg: cfg,
mgr: mgr,
target: cfg.Target,
- timeouts: targets.Get(cfg.Target.OS, cfg.Target.Arch).Timeouts(cfg.Slowdown),
+ sysTarget: sysTarget,
+ timeouts: sysTarget.Timeouts(cfg.Slowdown),
runners: make(map[string]*Runner),
info: make(map[string]VMState),
checker: checker,
@@ -245,6 +260,8 @@ func (serv *Server) handshake(conn *flatrpc.Conn) (string, []byte, *cover.Canoni
connectReply := &flatrpc.ConnectReply{
Debug: serv.cfg.Debug,
Cover: serv.cfg.Cover,
+ CoverEdges: serv.cfg.UseCoverEdges,
+ Kernel64Bit: serv.sysTarget.PtrSize == 8,
Procs: int32(serv.cfg.Procs),
Slowdown: int32(serv.timeouts.Slowdown),
SyscallTimeoutMs: int32(serv.timeouts.Syscall / time.Millisecond),
@@ -421,18 +438,20 @@ func (serv *Server) printMachineCheck(checkFilesInfo []*flatrpc.FileInfo, enable
func (serv *Server) CreateInstance(name string, injectExec chan<- bool) {
runner := &Runner{
- source: serv.execSource,
- cover: serv.cfg.Cover,
- debug: serv.cfg.Debug,
- injectExec: injectExec,
- infoc: make(chan chan []byte),
- finished: make(chan bool),
- requests: make(map[int64]*queue.Request),
- executing: make(map[int64]bool),
- lastExec: MakeLastExecuting(serv.cfg.Procs, 6),
- rnd: rand.New(rand.NewSource(time.Now().UnixNano())),
- stats: serv.runnerStats,
- procs: serv.cfg.Procs,
+ source: serv.execSource,
+ cover: serv.cfg.Cover,
+ filterSignal: serv.cfg.FilterSignal,
+ debug: serv.cfg.Debug,
+ sysTarget: serv.sysTarget,
+ injectExec: injectExec,
+ infoc: make(chan chan []byte),
+ finished: make(chan bool),
+ requests: make(map[int64]*queue.Request),
+ executing: make(map[int64]bool),
+ lastExec: MakeLastExecuting(serv.cfg.Procs, 6),
+ rnd: rand.New(rand.NewSource(time.Now().UnixNano())),
+ stats: serv.runnerStats,
+ procs: serv.cfg.Procs,
}
serv.mu.Lock()
if serv.runners[name] != nil {
diff --git a/pkg/rpcserver/runner.go b/pkg/rpcserver/runner.go
index 0c41346ee..b5903848f 100644
--- a/pkg/rpcserver/runner.go
+++ b/pkg/rpcserver/runner.go
@@ -18,13 +18,16 @@ import (
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/stats"
"github.com/google/syzkaller/prog"
+ "github.com/google/syzkaller/sys/targets"
)
type Runner struct {
source queue.Source
procs int
cover bool
+ filterSignal bool
debug bool
+ sysTarget *targets.Target
stats *runnerStats
stopped bool
finished chan bool
@@ -235,10 +238,8 @@ func (runner *Runner) handleExecResult(msg *flatrpc.ExecResult) error {
// Coverage collection is disabled, but signal was requested => use a substitute signal.
addFallbackSignal(req.Prog, msg.Info)
}
- for i := 0; i < len(msg.Info.Calls); i++ {
- call := msg.Info.Calls[i]
- call.Cover = runner.canonicalizer.Canonicalize(call.Cover)
- call.Signal = runner.canonicalizer.Canonicalize(call.Signal)
+ for _, call := range msg.Info.Calls {
+ runner.convertCallInfo(call)
}
if len(msg.Info.ExtraRaw) != 0 {
msg.Info.Extra = msg.Info.ExtraRaw[0]
@@ -248,9 +249,8 @@ func (runner *Runner) handleExecResult(msg *flatrpc.ExecResult) error {
msg.Info.Extra.Cover = append(msg.Info.Extra.Cover, info.Cover...)
msg.Info.Extra.Signal = append(msg.Info.Extra.Signal, info.Signal...)
}
- msg.Info.Extra.Cover = runner.canonicalizer.Canonicalize(msg.Info.Extra.Cover)
- msg.Info.Extra.Signal = runner.canonicalizer.Canonicalize(msg.Info.Extra.Signal)
msg.Info.ExtraRaw = nil
+ runner.convertCallInfo(msg.Info.Extra)
}
}
status := queue.Success
@@ -268,6 +268,49 @@ func (runner *Runner) handleExecResult(msg *flatrpc.ExecResult) error {
return nil
}
+func (runner *Runner) convertCallInfo(call *flatrpc.CallInfo) {
+ call.Cover = runner.canonicalizer.Canonicalize(call.Cover)
+ call.Signal = runner.canonicalizer.Canonicalize(call.Signal)
+
+ // Check signal belongs to kernel addresses.
+ // Mismatching addresses can mean either corrupted VM memory, or that the fuzzer somehow
+ // managed to inject output signal. If we see any bogus signal, drop whole signal
+ // (we don't want programs that can inject bogus coverage to end up in the corpus).
+ var kernelAddresses targets.KernelAddresses
+ if runner.filterSignal {
+ kernelAddresses = runner.sysTarget.KernelAddresses
+ }
+ textStart, textEnd := kernelAddresses.TextStart, kernelAddresses.TextEnd
+ if textStart != 0 {
+ for _, sig := range call.Signal {
+ if sig < textStart || sig > textEnd {
+ call.Signal = []uint64{}
+ call.Cover = []uint64{}
+ break
+ }
+ }
+ }
+
+ // Filter out kernel physical memory addresses.
+ // These are internal kernel comparisons and should not be interesting.
+ dataStart, dataEnd := kernelAddresses.DataStart, kernelAddresses.DataEnd
+ if len(call.Comps) != 0 && (textStart != 0 || dataStart != 0) {
+ if runner.sysTarget.PtrSize == 4 {
+ // These will appear sign-extended in comparison operands.
+ textStart = uint64(int64(int32(textStart)))
+ textEnd = uint64(int64(int32(textEnd)))
+ dataStart = uint64(int64(int32(dataStart)))
+ dataEnd = uint64(int64(int32(dataEnd)))
+ }
+ isKptr := func(val uint64) bool {
+ return val >= textStart && val <= textEnd || val >= dataStart && val <= dataEnd || val == 0
+ }
+ call.Comps = slices.DeleteFunc(call.Comps, func(cmp *flatrpc.Comparison) bool {
+ return isKptr(cmp.Op1) && isKptr(cmp.Op2)
+ })
+ }
+}
+
func (runner *Runner) sendSignalUpdate(plus, minus []uint64) error {
msg := &flatrpc.HostMessage{
Msg: &flatrpc.HostMessages{
diff --git a/pkg/runtest/executor_test.go b/pkg/runtest/executor_test.go
index 4bdadfd52..72889669d 100644
--- a/pkg/runtest/executor_test.go
+++ b/pkg/runtest/executor_test.go
@@ -61,7 +61,7 @@ func TestZlib(t *testing.T) {
}
executor := csource.BuildExecutor(t, target, "../..")
source := queue.Plain()
- startRpcserver(t, target, executor, source, nil, nil, nil)
+ startRPCServer(t, target, executor, "", source, nil, nil, nil)
r := rand.New(testutil.RandSource(t))
for i := 0; i < 10; i++ {
data := testutil.RandMountImage(r)
@@ -111,7 +111,7 @@ func TestExecutorCommonExt(t *testing.T) {
t.Fatal(err)
}
source := queue.Plain()
- startRpcserver(t, target, executor, source, nil, nil, nil)
+ startRPCServer(t, target, executor, "", source, nil, nil, nil)
req := &queue.Request{
Prog: p,
ReturnError: true,
diff --git a/pkg/runtest/run_test.go b/pkg/runtest/run_test.go
index 4c5cde642..f69173ef8 100644
--- a/pkg/runtest/run_test.go
+++ b/pkg/runtest/run_test.go
@@ -83,7 +83,7 @@ func test(t *testing.T, sysTarget *targets.Target) {
Verbose: true,
Debug: *flagDebug,
}
- startRpcserver(t, target, executor, ctx, nil, nil, func(features flatrpc.Feature) {
+ startRPCServer(t, target, executor, "", ctx, nil, nil, func(features flatrpc.Feature) {
// Features we expect to be enabled on the test OS.
// All sandboxes except for none are not implemented, coverage is not returned,
// and setup for few features is failing specifically to test feature detection.
@@ -138,7 +138,7 @@ func TestCover(t *testing.T) {
}
type CoverTest struct {
- Is64Bit int
+ Is64Bit bool
Input []byte
MaxSignal []uint64
CoverFilter []uint64
@@ -168,18 +168,18 @@ func testCover(t *testing.T, target *prog.Target) {
tests := []CoverTest{
// Empty coverage.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(),
Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
},
{
- Is64Bit: 0,
+ Is64Bit: false,
Input: makeCover32(),
Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
},
// Single 64-bit PC.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0112233),
Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
Cover: []uint64{0xc0dec0dec0112233},
@@ -187,7 +187,7 @@ func testCover(t *testing.T, target *prog.Target) {
},
// Single 32-bit PC.
{
- Is64Bit: 0,
+ Is64Bit: false,
Input: makeCover32(0xc0112233),
Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
Cover: []uint64{0xc0112233},
@@ -195,20 +195,20 @@ func testCover(t *testing.T, target *prog.Target) {
},
// Ensure we don't sent cover/signal when not requested.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0112233),
Flags: flatrpc.ExecFlagCollectCover,
Cover: []uint64{0xc0dec0dec0112233},
},
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0112233),
Flags: flatrpc.ExecFlagCollectSignal,
Signal: []uint64{0xc0dec0dec0112233},
},
// Coverage deduplication.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011,
0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033, 0xc0dec0dec0000011),
Flags: flatrpc.ExecFlagCollectCover,
@@ -216,7 +216,7 @@ func testCover(t *testing.T, target *prog.Target) {
0xc0dec0dec0000011, 0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033},
},
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011,
0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033, 0xc0dec0dec0000011),
Flags: flatrpc.ExecFlagCollectCover | flatrpc.ExecFlagDedupCover,
@@ -224,7 +224,7 @@ func testCover(t *testing.T, target *prog.Target) {
},
// Signal hashing.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0011001, 0xc0dec0dec0022002, 0xc0dec0dec00330f0,
0xc0dec0dec0044b00, 0xc0dec0dec0011001, 0xc0dec0dec0022002),
Flags: flatrpc.ExecFlagCollectSignal,
@@ -233,18 +233,18 @@ func testCover(t *testing.T, target *prog.Target) {
},
// Invalid non-kernel PCs must fail test execution.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0000022, 0xc000000000000033),
Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
},
{
- Is64Bit: 0,
+ Is64Bit: false,
Input: makeCover32(0x33),
Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
},
// 64-bit comparisons.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeComps(
// A normal 8-byte comparison must be returned in the output as is.
Comparison{CmpSize8 | CmpConst, 0x1111111111111111, 0x2222222222222222, 0},
@@ -287,14 +287,14 @@ func testCover(t *testing.T, target *prog.Target) {
},
// 32-bit comparisons must be the same, so test only a subset.
{
- Is64Bit: 0,
+ Is64Bit: false,
Input: makeComps(
Comparison{CmpSize8 | CmpConst, 0x1111111111111111, 0x2222222222222222, 0},
Comparison{CmpSize2 | CmpConst, 0xabcd, 0x4321, 0},
Comparison{CmpSize4 | CmpConst, 0xda1a0000, 0xda1a1000, 0},
Comparison{CmpSize8 | CmpConst, 0xc0dec0dec0de0000, 0xc0dec0dec0de1000, 0},
Comparison{CmpSize4 | CmpConst, 0xc0de0000, 0xc0de1000, 0},
- Comparison{CmpSize8 | CmpConst, 0xc0de0011, 0xc0de1022, 0},
+ Comparison{CmpSize4 | CmpConst, 0xc0de0011, 0xc0de1022, 0},
),
Flags: flatrpc.ExecFlagCollectComps,
Comps: [][2]uint64{
@@ -305,7 +305,7 @@ func testCover(t *testing.T, target *prog.Target) {
},
// Test max signal.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0000001, 0xc0dec0dec0000010, 0xc0dec0dec0000002,
0xc0dec0dec0000100, 0xc0dec0dec0001000),
MaxSignal: []uint64{0xc0dec0dec0000001, 0xc0dec0dec0000013, 0xc0dec0dec0000abc},
@@ -315,7 +315,7 @@ func testCover(t *testing.T, target *prog.Target) {
Signal: []uint64{0xc0dec0dec0001100, 0xc0dec0dec0000102},
},
{
- Is64Bit: 0,
+ Is64Bit: false,
Input: makeCover32(0xc0000001, 0xc0000010, 0xc0000002, 0xc0000100, 0xc0001000),
MaxSignal: []uint64{0xc0000001, 0xc0000013, 0xc0000abc},
Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
@@ -323,7 +323,7 @@ func testCover(t *testing.T, target *prog.Target) {
Signal: []uint64{0xc0001100, 0xc0000102},
},
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0000001, 0xc0dec0dec0000010, 0xc0dec0dec0000002,
0xc0dec0dec0000100, 0xc0dec0dec0001000),
MaxSignal: []uint64{0xc0dec0dec0000001, 0xc0dec0dec0000013, 0xc0dec0dec0000abc},
@@ -334,7 +334,7 @@ func testCover(t *testing.T, target *prog.Target) {
},
// Test cover filter.
{
- Is64Bit: 1,
+ Is64Bit: true,
Input: makeCover64(0xc0dec0dec0000001, 0xc0dec0dec0000010, 0xc0dec0dec0000020,
0xc0dec0dec0000040, 0xc0dec0dec0000100, 0xc0dec0dec0001000, 0xc0dec0dec0002000),
CoverFilter: []uint64{0xc0dec0dec0000002, 0xc0dec0dec0000100},
@@ -344,7 +344,7 @@ func testCover(t *testing.T, target *prog.Target) {
Signal: []uint64{0xc0dec0dec0001100, 0xc0dec0dec0000140, 0xc0dec0dec0000011, 0xc0dec0dec0000001},
},
{
- Is64Bit: 0,
+ Is64Bit: false,
Input: makeCover32(0xc0000001, 0xc0000010, 0xc0000020, 0xc0000040,
0xc0000100, 0xc0001000, 0xc0002000),
CoverFilter: []uint64{0xc0000002, 0xc0000100},
@@ -355,24 +355,23 @@ func testCover(t *testing.T, target *prog.Target) {
},
}
executor := csource.BuildExecutor(t, target, "../../")
- source := queue.Plain()
- startRpcserver(t, target, executor, source, nil, nil, nil)
for i, test := range tests {
test := test
t.Run(fmt.Sprint(i), func(t *testing.T) {
t.Parallel()
- mysource := source
- if len(test.MaxSignal)+len(test.CoverFilter) != 0 {
- mysource = queue.Plain()
- startRpcserver(t, target, executor, mysource, test.MaxSignal, test.CoverFilter, nil)
+ source := queue.Plain()
+ vmArch := targets.TestArch32
+ if test.Is64Bit {
+ vmArch = targets.TestArch64
}
- testCover1(t, target, test, mysource)
+ startRPCServer(t, target, executor, vmArch, source, test.MaxSignal, test.CoverFilter, nil)
+ testCover1(t, target, test, source)
})
}
}
func testCover1(t *testing.T, target *prog.Target, test CoverTest, source *queue.PlainQueue) {
- text := fmt.Sprintf(`syz_inject_cover(0x%v, &AUTO="%s", AUTO)`, test.Is64Bit, hex.EncodeToString(test.Input))
+ text := fmt.Sprintf(`syz_inject_cover(&AUTO="%s", AUTO)`, hex.EncodeToString(test.Input))
p, err := target.Deserialize([]byte(text), prog.Strict)
if err != nil {
t.Fatal(err)
@@ -436,7 +435,7 @@ func makeComps(comps ...Comparison) []byte {
return w.Bytes()
}
-func startRpcserver(t *testing.T, target *prog.Target, executor string, source queue.Source,
+func startRPCServer(t *testing.T, target *prog.Target, executor, vmArch string, source queue.Source,
maxSignal, coverFilter []uint64, machineChecked func(features flatrpc.Feature)) {
ctx, done := context.WithCancel(context.Background())
cfg := &rpcserver.LocalConfig{
@@ -448,6 +447,7 @@ func startRpcserver(t *testing.T, target *prog.Target, executor string, source q
Features: flatrpc.AllFeatures,
Sandbox: flatrpc.ExecEnvSandboxNone,
},
+ VMArch: vmArch,
Procs: runtime.GOMAXPROCS(0),
Slowdown: 10, // to deflake slower tests
},