diff options
| author | Dmitry Vyukov <dvyukov@google.com> | 2024-06-28 16:33:04 +0200 |
|---|---|---|
| committer | Dmitry Vyukov <dvyukov@google.com> | 2024-07-01 13:48:43 +0000 |
| commit | a6f99ace4014896f81a2f101416fd5413579f2bd (patch) | |
| tree | c6ace6c5a8736261fd462e83e19bbb88bd1a2ee3 /pkg/rpcserver | |
| parent | 1f0ee43044bc8fc00bc1eccc85a93bf2b9972dd1 (diff) | |
pkg/rpcserver: move kernel test/data range checks from executor
We see some errors of the form:
SYZFAIL: coverage filter is full
pc=0x80007000c0008 regions=[0xffffffffbfffffff 0x243fffffff 0x143fffffff 0xc3fffffff] alloc=156
Executor shouldn't send non kernel addresses in signal,
but somehow it does. It can happen if the VM memory is corrupted,
or if the test program does something very nasty (e.g. discovers
the output region and writes to it).
It's not possible to reliably filter signal in the tested VM.
Move all of the filtering logic to the host.
Fixes #4942
Diffstat (limited to 'pkg/rpcserver')
| -rw-r--r-- | pkg/rpcserver/local.go | 5 | ||||
| -rw-r--r-- | pkg/rpcserver/rpcserver.go | 65 | ||||
| -rw-r--r-- | pkg/rpcserver/runner.go | 55 |
3 files changed, 96 insertions, 29 deletions
diff --git a/pkg/rpcserver/local.go b/pkg/rpcserver/local.go index bd58ca4ad..1831259f2 100644 --- a/pkg/rpcserver/local.go +++ b/pkg/rpcserver/local.go @@ -38,6 +38,11 @@ type LocalConfig struct { } func RunLocal(cfg *LocalConfig) error { + if cfg.VMArch == "" { + cfg.VMArch = cfg.Target.Arch + } + cfg.UseCoverEdges = true + cfg.FilterSignal = true cfg.RPC = ":0" cfg.VMLess = true cfg.PrintMachineCheck = log.V(1) diff --git a/pkg/rpcserver/rpcserver.go b/pkg/rpcserver/rpcserver.go index acf31e868..cdeb6f40b 100644 --- a/pkg/rpcserver/rpcserver.go +++ b/pkg/rpcserver/rpcserver.go @@ -30,8 +30,14 @@ import ( type Config struct { vminfo.Config - RPC string - VMLess bool + VMArch string + RPC string + VMLess bool + // Hash adjacent PCs to form fuzzing feedback signal (otherwise just use coverage PCs as signal). + UseCoverEdges bool + // Filter signal/comparisons against target kernel text/data ranges. + // Disabled for gVisor/Starnix which are not Linux. + FilterSignal bool PrintMachineCheck bool Procs int Slowdown int @@ -49,12 +55,13 @@ type Server struct { StatExecs *stats.Val StatNumFuzzing *stats.Val - cfg *Config - mgr Manager - serv *flatrpc.Serv - target *prog.Target - timeouts targets.Timeouts - checker *vminfo.Checker + cfg *Config + mgr Manager + serv *flatrpc.Serv + target *prog.Target + sysTarget *targets.Target + timeouts targets.Timeouts + checker *vminfo.Checker infoOnce sync.Once checkDone atomic.Bool @@ -88,8 +95,13 @@ func New(cfg *mgrconfig.Config, mgr Manager, debug bool) (*Server, error) { Sandbox: sandbox, SandboxArg: cfg.SandboxArg, }, - RPC: cfg.RPC, - VMLess: cfg.VMLess, + VMArch: cfg.TargetVMArch, + RPC: cfg.RPC, + VMLess: cfg.VMLess, + // gVisor coverage is not a trace, so producing edges won't work. + UseCoverEdges: cfg.Type != targets.GVisor, + // gVisor/Starnix are not Linux, so filtering against Linux ranges won't work. + FilterSignal: cfg.Type != targets.GVisor && cfg.Type != targets.Starnix, PrintMachineCheck: true, Procs: cfg.Procs, Slowdown: cfg.Timeouts.Slowdown, @@ -100,11 +112,14 @@ func newImpl(cfg *Config, mgr Manager) (*Server, error) { cfg.Procs = min(cfg.Procs, prog.MaxPids) checker := vminfo.New(&cfg.Config) baseSource := queue.DynamicSource(checker) + // Note that we use VMArch, rather than Arch. We need the kernel address ranges and bitness. + sysTarget := targets.Get(cfg.Target.OS, cfg.VMArch) serv := &Server{ cfg: cfg, mgr: mgr, target: cfg.Target, - timeouts: targets.Get(cfg.Target.OS, cfg.Target.Arch).Timeouts(cfg.Slowdown), + sysTarget: sysTarget, + timeouts: sysTarget.Timeouts(cfg.Slowdown), runners: make(map[string]*Runner), info: make(map[string]VMState), checker: checker, @@ -245,6 +260,8 @@ func (serv *Server) handshake(conn *flatrpc.Conn) (string, []byte, *cover.Canoni connectReply := &flatrpc.ConnectReply{ Debug: serv.cfg.Debug, Cover: serv.cfg.Cover, + CoverEdges: serv.cfg.UseCoverEdges, + Kernel64Bit: serv.sysTarget.PtrSize == 8, Procs: int32(serv.cfg.Procs), Slowdown: int32(serv.timeouts.Slowdown), SyscallTimeoutMs: int32(serv.timeouts.Syscall / time.Millisecond), @@ -421,18 +438,20 @@ func (serv *Server) printMachineCheck(checkFilesInfo []*flatrpc.FileInfo, enable func (serv *Server) CreateInstance(name string, injectExec chan<- bool) { runner := &Runner{ - source: serv.execSource, - cover: serv.cfg.Cover, - debug: serv.cfg.Debug, - injectExec: injectExec, - infoc: make(chan chan []byte), - finished: make(chan bool), - requests: make(map[int64]*queue.Request), - executing: make(map[int64]bool), - lastExec: MakeLastExecuting(serv.cfg.Procs, 6), - rnd: rand.New(rand.NewSource(time.Now().UnixNano())), - stats: serv.runnerStats, - procs: serv.cfg.Procs, + source: serv.execSource, + cover: serv.cfg.Cover, + filterSignal: serv.cfg.FilterSignal, + debug: serv.cfg.Debug, + sysTarget: serv.sysTarget, + injectExec: injectExec, + infoc: make(chan chan []byte), + finished: make(chan bool), + requests: make(map[int64]*queue.Request), + executing: make(map[int64]bool), + lastExec: MakeLastExecuting(serv.cfg.Procs, 6), + rnd: rand.New(rand.NewSource(time.Now().UnixNano())), + stats: serv.runnerStats, + procs: serv.cfg.Procs, } serv.mu.Lock() if serv.runners[name] != nil { diff --git a/pkg/rpcserver/runner.go b/pkg/rpcserver/runner.go index 0c41346ee..b5903848f 100644 --- a/pkg/rpcserver/runner.go +++ b/pkg/rpcserver/runner.go @@ -18,13 +18,16 @@ import ( "github.com/google/syzkaller/pkg/osutil" "github.com/google/syzkaller/pkg/stats" "github.com/google/syzkaller/prog" + "github.com/google/syzkaller/sys/targets" ) type Runner struct { source queue.Source procs int cover bool + filterSignal bool debug bool + sysTarget *targets.Target stats *runnerStats stopped bool finished chan bool @@ -235,10 +238,8 @@ func (runner *Runner) handleExecResult(msg *flatrpc.ExecResult) error { // Coverage collection is disabled, but signal was requested => use a substitute signal. addFallbackSignal(req.Prog, msg.Info) } - for i := 0; i < len(msg.Info.Calls); i++ { - call := msg.Info.Calls[i] - call.Cover = runner.canonicalizer.Canonicalize(call.Cover) - call.Signal = runner.canonicalizer.Canonicalize(call.Signal) + for _, call := range msg.Info.Calls { + runner.convertCallInfo(call) } if len(msg.Info.ExtraRaw) != 0 { msg.Info.Extra = msg.Info.ExtraRaw[0] @@ -248,9 +249,8 @@ func (runner *Runner) handleExecResult(msg *flatrpc.ExecResult) error { msg.Info.Extra.Cover = append(msg.Info.Extra.Cover, info.Cover...) msg.Info.Extra.Signal = append(msg.Info.Extra.Signal, info.Signal...) } - msg.Info.Extra.Cover = runner.canonicalizer.Canonicalize(msg.Info.Extra.Cover) - msg.Info.Extra.Signal = runner.canonicalizer.Canonicalize(msg.Info.Extra.Signal) msg.Info.ExtraRaw = nil + runner.convertCallInfo(msg.Info.Extra) } } status := queue.Success @@ -268,6 +268,49 @@ func (runner *Runner) handleExecResult(msg *flatrpc.ExecResult) error { return nil } +func (runner *Runner) convertCallInfo(call *flatrpc.CallInfo) { + call.Cover = runner.canonicalizer.Canonicalize(call.Cover) + call.Signal = runner.canonicalizer.Canonicalize(call.Signal) + + // Check signal belongs to kernel addresses. + // Mismatching addresses can mean either corrupted VM memory, or that the fuzzer somehow + // managed to inject output signal. If we see any bogus signal, drop whole signal + // (we don't want programs that can inject bogus coverage to end up in the corpus). + var kernelAddresses targets.KernelAddresses + if runner.filterSignal { + kernelAddresses = runner.sysTarget.KernelAddresses + } + textStart, textEnd := kernelAddresses.TextStart, kernelAddresses.TextEnd + if textStart != 0 { + for _, sig := range call.Signal { + if sig < textStart || sig > textEnd { + call.Signal = []uint64{} + call.Cover = []uint64{} + break + } + } + } + + // Filter out kernel physical memory addresses. + // These are internal kernel comparisons and should not be interesting. + dataStart, dataEnd := kernelAddresses.DataStart, kernelAddresses.DataEnd + if len(call.Comps) != 0 && (textStart != 0 || dataStart != 0) { + if runner.sysTarget.PtrSize == 4 { + // These will appear sign-extended in comparison operands. + textStart = uint64(int64(int32(textStart))) + textEnd = uint64(int64(int32(textEnd))) + dataStart = uint64(int64(int32(dataStart))) + dataEnd = uint64(int64(int32(dataEnd))) + } + isKptr := func(val uint64) bool { + return val >= textStart && val <= textEnd || val >= dataStart && val <= dataEnd || val == 0 + } + call.Comps = slices.DeleteFunc(call.Comps, func(cmp *flatrpc.Comparison) bool { + return isKptr(cmp.Op1) && isKptr(cmp.Op2) + }) + } +} + func (runner *Runner) sendSignalUpdate(plus, minus []uint64) error { msg := &flatrpc.HostMessage{ Msg: &flatrpc.HostMessages{ |
