aboutsummaryrefslogtreecommitdiffstats
path: root/pkg/rpcserver/rpcserver.go
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2024-06-28 16:33:04 +0200
committerDmitry Vyukov <dvyukov@google.com>2024-07-01 13:48:43 +0000
commita6f99ace4014896f81a2f101416fd5413579f2bd (patch)
treec6ace6c5a8736261fd462e83e19bbb88bd1a2ee3 /pkg/rpcserver/rpcserver.go
parent1f0ee43044bc8fc00bc1eccc85a93bf2b9972dd1 (diff)
pkg/rpcserver: move kernel test/data range checks from executor
We see some errors of the form: SYZFAIL: coverage filter is full pc=0x80007000c0008 regions=[0xffffffffbfffffff 0x243fffffff 0x143fffffff 0xc3fffffff] alloc=156 Executor shouldn't send non kernel addresses in signal, but somehow it does. It can happen if the VM memory is corrupted, or if the test program does something very nasty (e.g. discovers the output region and writes to it). It's not possible to reliably filter signal in the tested VM. Move all of the filtering logic to the host. Fixes #4942
Diffstat (limited to 'pkg/rpcserver/rpcserver.go')
-rw-r--r--pkg/rpcserver/rpcserver.go65
1 files changed, 42 insertions, 23 deletions
diff --git a/pkg/rpcserver/rpcserver.go b/pkg/rpcserver/rpcserver.go
index acf31e868..cdeb6f40b 100644
--- a/pkg/rpcserver/rpcserver.go
+++ b/pkg/rpcserver/rpcserver.go
@@ -30,8 +30,14 @@ import (
type Config struct {
vminfo.Config
- RPC string
- VMLess bool
+ VMArch string
+ RPC string
+ VMLess bool
+ // Hash adjacent PCs to form fuzzing feedback signal (otherwise just use coverage PCs as signal).
+ UseCoverEdges bool
+ // Filter signal/comparisons against target kernel text/data ranges.
+ // Disabled for gVisor/Starnix which are not Linux.
+ FilterSignal bool
PrintMachineCheck bool
Procs int
Slowdown int
@@ -49,12 +55,13 @@ type Server struct {
StatExecs *stats.Val
StatNumFuzzing *stats.Val
- cfg *Config
- mgr Manager
- serv *flatrpc.Serv
- target *prog.Target
- timeouts targets.Timeouts
- checker *vminfo.Checker
+ cfg *Config
+ mgr Manager
+ serv *flatrpc.Serv
+ target *prog.Target
+ sysTarget *targets.Target
+ timeouts targets.Timeouts
+ checker *vminfo.Checker
infoOnce sync.Once
checkDone atomic.Bool
@@ -88,8 +95,13 @@ func New(cfg *mgrconfig.Config, mgr Manager, debug bool) (*Server, error) {
Sandbox: sandbox,
SandboxArg: cfg.SandboxArg,
},
- RPC: cfg.RPC,
- VMLess: cfg.VMLess,
+ VMArch: cfg.TargetVMArch,
+ RPC: cfg.RPC,
+ VMLess: cfg.VMLess,
+ // gVisor coverage is not a trace, so producing edges won't work.
+ UseCoverEdges: cfg.Type != targets.GVisor,
+ // gVisor/Starnix are not Linux, so filtering against Linux ranges won't work.
+ FilterSignal: cfg.Type != targets.GVisor && cfg.Type != targets.Starnix,
PrintMachineCheck: true,
Procs: cfg.Procs,
Slowdown: cfg.Timeouts.Slowdown,
@@ -100,11 +112,14 @@ func newImpl(cfg *Config, mgr Manager) (*Server, error) {
cfg.Procs = min(cfg.Procs, prog.MaxPids)
checker := vminfo.New(&cfg.Config)
baseSource := queue.DynamicSource(checker)
+ // Note that we use VMArch, rather than Arch. We need the kernel address ranges and bitness.
+ sysTarget := targets.Get(cfg.Target.OS, cfg.VMArch)
serv := &Server{
cfg: cfg,
mgr: mgr,
target: cfg.Target,
- timeouts: targets.Get(cfg.Target.OS, cfg.Target.Arch).Timeouts(cfg.Slowdown),
+ sysTarget: sysTarget,
+ timeouts: sysTarget.Timeouts(cfg.Slowdown),
runners: make(map[string]*Runner),
info: make(map[string]VMState),
checker: checker,
@@ -245,6 +260,8 @@ func (serv *Server) handshake(conn *flatrpc.Conn) (string, []byte, *cover.Canoni
connectReply := &flatrpc.ConnectReply{
Debug: serv.cfg.Debug,
Cover: serv.cfg.Cover,
+ CoverEdges: serv.cfg.UseCoverEdges,
+ Kernel64Bit: serv.sysTarget.PtrSize == 8,
Procs: int32(serv.cfg.Procs),
Slowdown: int32(serv.timeouts.Slowdown),
SyscallTimeoutMs: int32(serv.timeouts.Syscall / time.Millisecond),
@@ -421,18 +438,20 @@ func (serv *Server) printMachineCheck(checkFilesInfo []*flatrpc.FileInfo, enable
func (serv *Server) CreateInstance(name string, injectExec chan<- bool) {
runner := &Runner{
- source: serv.execSource,
- cover: serv.cfg.Cover,
- debug: serv.cfg.Debug,
- injectExec: injectExec,
- infoc: make(chan chan []byte),
- finished: make(chan bool),
- requests: make(map[int64]*queue.Request),
- executing: make(map[int64]bool),
- lastExec: MakeLastExecuting(serv.cfg.Procs, 6),
- rnd: rand.New(rand.NewSource(time.Now().UnixNano())),
- stats: serv.runnerStats,
- procs: serv.cfg.Procs,
+ source: serv.execSource,
+ cover: serv.cfg.Cover,
+ filterSignal: serv.cfg.FilterSignal,
+ debug: serv.cfg.Debug,
+ sysTarget: serv.sysTarget,
+ injectExec: injectExec,
+ infoc: make(chan chan []byte),
+ finished: make(chan bool),
+ requests: make(map[int64]*queue.Request),
+ executing: make(map[int64]bool),
+ lastExec: MakeLastExecuting(serv.cfg.Procs, 6),
+ rnd: rand.New(rand.NewSource(time.Now().UnixNano())),
+ stats: serv.runnerStats,
+ procs: serv.cfg.Procs,
}
serv.mu.Lock()
if serv.runners[name] != nil {