aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--executor/executor.cc10
-rw-r--r--pkg/csource/csource.go8
-rw-r--r--pkg/csource/csource_test.go1
-rw-r--r--pkg/csource/options.go10
-rw-r--r--pkg/csource/options_test.go11
-rw-r--r--pkg/instance/instance.go97
-rw-r--r--pkg/instance/instance_test.go11
-rw-r--r--pkg/ipc/ipc.go12
-rw-r--r--pkg/ipc/ipc_test.go10
-rw-r--r--pkg/ipc/ipcconfig/ipcconfig.go4
-rw-r--r--pkg/mgrconfig/load.go20
-rw-r--r--pkg/repro/repro.go33
-rw-r--r--pkg/rpctype/rpc.go31
-rw-r--r--pkg/runtest/run.go2
-rw-r--r--sys/linux/init.go1
-rw-r--r--sys/targets/targets.go76
-rw-r--r--syz-fuzzer/fuzzer.go18
-rw-r--r--syz-fuzzer/testing.go2
-rw-r--r--syz-manager/hub.go4
-rw-r--r--syz-manager/manager.go6
-rw-r--r--tools/syz-crush/crush.go13
-rw-r--r--tools/syz-execprog/execprog.go3
-rw-r--r--tools/syz-hubtool/hubtool.go2
-rw-r--r--tools/syz-runtest/runtest.go2
-rw-r--r--vm/vm.go31
-rw-r--r--vm/vm_test.go6
26 files changed, 299 insertions, 125 deletions
diff --git a/executor/executor.cc b/executor/executor.cc
index 76d172cc3..31969ff40 100644
--- a/executor/executor.cc
+++ b/executor/executor.cc
@@ -567,10 +567,14 @@ void receive_execute()
flag_fault_nth = req.fault_nth;
if (!flag_threaded)
flag_collide = false;
- debug("[%llums] exec opts: procid=%llu threaded=%d collide=%d cover=%d comps=%d dedup=%d fault=%d/%d/%d prog=%llu filter=%d\n",
+ debug("[%llums] exec opts: procid=%llu threaded=%d collide=%d cover=%d comps=%d dedup=%d fault=%d/%d/%d"
+ " timeouts=%llu/%llu/%llu prog=%llu filter=%d\n",
current_time_ms() - start_time_ms, procid, flag_threaded, flag_collide,
flag_collect_cover, flag_comparisons, flag_dedup_cover, flag_fault,
- flag_fault_call, flag_fault_nth, req.prog_size, flag_coverage_filter);
+ flag_fault_call, flag_fault_nth, syscall_timeout_ms, program_timeout_ms, slowdown_scale,
+ req.prog_size, flag_coverage_filter);
+ if (syscall_timeout_ms == 0 || program_timeout_ms <= syscall_timeout_ms || slowdown_scale == 0)
+ fail("bad timeouts: %llu/%llu/%llu", syscall_timeout_ms, program_timeout_ms, slowdown_scale);
if (SYZ_EXECUTOR_USES_SHMEM) {
if (req.prog_size)
fail("need_prog: no program");
@@ -758,8 +762,6 @@ retry:
// We already have results from the previous execution.
} else if (flag_threaded) {
// Wait for call completion.
- // Note: sys/linux knows about this 45 ms timeout when it generates timespec/timeval values.
- // Note: pkg/csource also knows about this 45 ms per-call timeout.
uint64 timeout_ms = syscall_timeout_ms + call->attrs.timeout * slowdown_scale;
// This is because of printing pre/post call. Ideally we print everything in the main thread
// and then remove this (would also avoid intermixed output).
diff --git a/pkg/csource/csource.go b/pkg/csource/csource.go
index b53b3b97f..a01141567 100644
--- a/pkg/csource/csource.go
+++ b/pkg/csource/csource.go
@@ -29,6 +29,7 @@ import (
"regexp"
"sort"
"strings"
+ "time"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/targets"
@@ -93,11 +94,12 @@ func Write(p *prog.Prog, opts Options) ([]byte, error) {
replacements["SANDBOX_FUNC"] = replacements["SYSCALLS"]
replacements["SYSCALLS"] = "unused"
}
- replacements["PROGRAM_TIMEOUT_MS"] = "5000"
- timeoutExpr := "45"
+ timeouts := ctx.sysTarget.Timeouts(opts.Slowdown)
+ replacements["PROGRAM_TIMEOUT_MS"] = fmt.Sprint(int(timeouts.Program / time.Millisecond))
+ timeoutExpr := fmt.Sprint(int(timeouts.Syscall / time.Millisecond))
for i, call := range p.Calls {
if timeout := call.Meta.Attrs.Timeout; timeout != 0 {
- timeoutExpr += fmt.Sprintf(" + (call == %d ? %d : 0)", i, timeout)
+ timeoutExpr += fmt.Sprintf(" + (call == %v ? %v : 0)", i, timeout*uint64(timeouts.Scale))
}
}
replacements["CALL_TIMEOUT_MS"] = timeoutExpr
diff --git a/pkg/csource/csource_test.go b/pkg/csource/csource_test.go
index f6d04b35c..951f9f09a 100644
--- a/pkg/csource/csource_test.go
+++ b/pkg/csource/csource_test.go
@@ -53,6 +53,7 @@ var executorOpts = Options{
Collide: true,
Repeat: true,
Procs: 2,
+ Slowdown: 1,
Sandbox: "none",
Repro: true,
UseTmpDir: true,
diff --git a/pkg/csource/options.go b/pkg/csource/options.go
index ebea29fc2..3c1790483 100644
--- a/pkg/csource/options.go
+++ b/pkg/csource/options.go
@@ -23,6 +23,7 @@ type Options struct {
Repeat bool `json:"repeat,omitempty"`
RepeatTimes int `json:"repeat_times,omitempty"` // if non-0, repeat that many times
Procs int `json:"procs"`
+ Slowdown int `json:"slowdown"`
Sandbox string `json:"sandbox"`
Fault bool `json:"fault,omitempty"` // inject fault into FaultCall/FaultNth
@@ -154,6 +155,7 @@ func DefaultOpts(cfg *mgrconfig.Config) Options {
Collide: true,
Repeat: true,
Procs: cfg.Procs,
+ Slowdown: cfg.Timeouts.Slowdown,
Sandbox: cfg.Sandbox,
UseTmpDir: true,
HandleSegv: true,
@@ -190,9 +192,11 @@ func (opts Options) Serialize() []byte {
}
func DeserializeOptions(data []byte) (Options, error) {
- var opts Options
- // Before CloseFDs was added, close_fds() was always called, so default to true.
- opts.CloseFDs = true
+ opts := Options{
+ Slowdown: 1,
+ // Before CloseFDs was added, close_fds() was always called, so default to true.
+ CloseFDs: true,
+ }
if err := json.Unmarshal(data, &opts); err == nil {
return opts, nil
}
diff --git a/pkg/csource/options_test.go b/pkg/csource/options_test.go
index b9594e341..cd247fada 100644
--- a/pkg/csource/options_test.go
+++ b/pkg/csource/options_test.go
@@ -37,6 +37,7 @@ func TestParseOptionsCanned(t *testing.T) {
Collide: true,
Repeat: true,
Procs: 10,
+ Slowdown: 1,
Sandbox: "namespace",
Fault: true,
FaultCall: 1,
@@ -59,6 +60,7 @@ func TestParseOptionsCanned(t *testing.T) {
Collide: true,
Repeat: true,
Procs: 10,
+ Slowdown: 1,
Sandbox: "android",
Fault: true,
FaultCall: 1,
@@ -78,6 +80,7 @@ func TestParseOptionsCanned(t *testing.T) {
Collide: true,
Repeat: true,
Procs: 1,
+ Slowdown: 1,
Sandbox: "none",
Fault: false,
FaultCall: -1,
@@ -95,6 +98,7 @@ func TestParseOptionsCanned(t *testing.T) {
Collide: true,
Repeat: true,
Procs: 1,
+ Slowdown: 1,
Sandbox: "",
Fault: false,
FaultCall: -1,
@@ -112,6 +116,7 @@ func TestParseOptionsCanned(t *testing.T) {
Collide: true,
Repeat: true,
Procs: 1,
+ Slowdown: 1,
Sandbox: "namespace",
Fault: false,
FaultCall: -1,
@@ -147,6 +152,7 @@ func allOptionsSingle(OS string) []Options {
Repeat: true,
Sandbox: "none",
UseTmpDir: true,
+ Slowdown: 1,
}
opts = append(opts, enumerateField(OS, opt, i)...)
}
@@ -200,6 +206,11 @@ func enumerateField(OS string, opt Options, field int) []Options {
fld.SetInt(times)
opts = append(opts, opt)
}
+ } else if fldName == "Slowdown" {
+ for _, val := range []int64{1, 10} {
+ fld.SetInt(val)
+ opts = append(opts, opt)
+ }
} else if fldName == "FaultCall" {
opts = append(opts, opt)
} else if fldName == "FaultNth" {
diff --git a/pkg/instance/instance.go b/pkg/instance/instance.go
index 861d51e72..52881b353 100644
--- a/pkg/instance/instance.go
+++ b/pkg/instance/instance.go
@@ -21,6 +21,7 @@ import (
"github.com/google/syzkaller/pkg/mgrconfig"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/report"
+ "github.com/google/syzkaller/pkg/tool"
"github.com/google/syzkaller/pkg/vcs"
"github.com/google/syzkaller/sys/targets"
"github.com/google/syzkaller/vm"
@@ -33,7 +34,8 @@ type Env interface {
}
type env struct {
- cfg *mgrconfig.Config
+ cfg *mgrconfig.Config
+ optionalFlags bool
}
func NewEnv(cfg *mgrconfig.Config) (Env, error) {
@@ -53,20 +55,30 @@ func NewEnv(cfg *mgrconfig.Config) (Env, error) {
return nil, fmt.Errorf("failed to create tmp dir: %v", err)
}
env := &env{
- cfg: cfg,
+ cfg: cfg,
+ optionalFlags: true,
}
return env, nil
}
-func (env *env) BuildSyzkaller(repo, commit string) error {
+func (env *env) BuildSyzkaller(repoURL, commit string) error {
cfg := env.cfg
srcIndex := strings.LastIndex(cfg.Syzkaller, "/src/")
if srcIndex == -1 {
return fmt.Errorf("syzkaller path %q is not in GOPATH", cfg.Syzkaller)
}
- if _, err := vcs.NewSyzkallerRepo(cfg.Syzkaller).CheckoutCommit(repo, commit); err != nil {
+ repo := vcs.NewSyzkallerRepo(cfg.Syzkaller)
+ if _, err := repo.CheckoutCommit(repoURL, commit); err != nil {
return fmt.Errorf("failed to checkout syzkaller repo: %v", err)
}
+ // The following commit ("syz-fuzzer: support optional flags") adds support for optional flags
+ // in syz-fuzzer and syz-execprog. This is required to invoke older binaries with newer flags
+ // without failing due to unknown flags.
+ optionalFlags, err := repo.Contains("64435345f0891706a7e0c7885f5f7487581e6005")
+ if err != nil {
+ return fmt.Errorf("optional flags check failed: %v", err)
+ }
+ env.optionalFlags = optionalFlags
cmd := osutil.Command(MakeBin, "target")
cmd.Dir = cfg.Syzkaller
cmd.Env = append([]string{}, os.Environ()...)
@@ -210,13 +222,14 @@ func (env *env) Test(numVMs int, reproSyz, reproOpts, reproC []byte) ([]error, e
res := make(chan error, numVMs)
for i := 0; i < numVMs; i++ {
inst := &inst{
- cfg: env.cfg,
- reporter: reporter,
- vmPool: vmPool,
- vmIndex: i,
- reproSyz: reproSyz,
- reproOpts: reproOpts,
- reproC: reproC,
+ cfg: env.cfg,
+ optionalFlags: env.optionalFlags,
+ reporter: reporter,
+ vmPool: vmPool,
+ vmIndex: i,
+ reproSyz: reproSyz,
+ reproOpts: reproOpts,
+ reproC: reproC,
}
go func() { res <- inst.test() }()
}
@@ -228,14 +241,15 @@ func (env *env) Test(numVMs int, reproSyz, reproOpts, reproC []byte) ([]error, e
}
type inst struct {
- cfg *mgrconfig.Config
- reporter report.Reporter
- vmPool *vm.Pool
- vm *vm.Instance
- vmIndex int
- reproSyz []byte
- reproOpts []byte
- reproC []byte
+ cfg *mgrconfig.Config
+ optionalFlags bool
+ reporter report.Reporter
+ vmPool *vm.Pool
+ vm *vm.Instance
+ vmIndex int
+ reproSyz []byte
+ reproOpts []byte
+ reproC []byte
}
func (inst *inst) test() error {
@@ -318,8 +332,8 @@ func (inst *inst) testInstance() error {
}
cmd := OldFuzzerCmd(fuzzerBin, executorBin, targets.TestOS, inst.cfg.TargetOS, inst.cfg.TargetArch, fwdAddr,
- inst.cfg.Sandbox, 0, inst.cfg.Cover, true)
- outc, errc, err := inst.vm.Run(10*time.Minute, nil, cmd)
+ inst.cfg.Sandbox, 0, inst.cfg.Cover, true, inst.optionalFlags, inst.cfg.Timeouts.Slowdown)
+ outc, errc, err := inst.vm.Run(10*time.Minute*inst.cfg.Timeouts.Scale, nil, cmd)
if err != nil {
return fmt.Errorf("failed to run binary in VM: %v", err)
}
@@ -381,8 +395,9 @@ func (inst *inst) testRepro() error {
opts.FaultCall = -1
}
cmdSyz := ExecprogCmd(execprogBin, executorBin, cfg.TargetOS, cfg.TargetArch, opts.Sandbox,
- true, true, true, cfg.Procs, opts.FaultCall, opts.FaultNth, vmProgFile)
- if err := inst.testProgram(cmdSyz, 7*time.Minute); err != nil {
+ true, true, true, cfg.Procs, opts.FaultCall, opts.FaultNth, inst.optionalFlags,
+ cfg.Timeouts.Slowdown, vmProgFile)
+ if err := inst.testProgram(cmdSyz, cfg.Timeouts.NoOutputRunningTime); err != nil {
return err
}
}
@@ -398,9 +413,9 @@ func (inst *inst) testRepro() error {
if err != nil {
return &TestError{Title: fmt.Sprintf("failed to copy test binary to VM: %v", err)}
}
- // We should test for longer (e.g. 5 mins), but the problem is that
- // reproducer does not print anything, so after 3 mins we detect "no output".
- return inst.testProgram(vmBin, time.Minute)
+ // We should test for more than full "no output" timeout, but the problem is that C reproducers
+ // don't print anything, so we will get a false "no output" crash.
+ return inst.testProgram(vmBin, cfg.Timeouts.NoOutput/2)
}
func (inst *inst) testProgram(command string, testTime time.Duration) error {
@@ -420,7 +435,7 @@ func (inst *inst) testProgram(command string, testTime time.Duration) error {
}
func FuzzerCmd(fuzzer, executor, name, OS, arch, fwdAddr, sandbox string, procs, verbosity int,
- cover, debug, test, runtest bool) string {
+ cover, debug, test, runtest, optionalFlags bool, slowdown int) string {
osArg := ""
if targets.Get(OS, arch).HostFuzzer {
// Only these OSes need the flag, because the rest assume host OS.
@@ -436,18 +451,26 @@ func FuzzerCmd(fuzzer, executor, name, OS, arch, fwdAddr, sandbox string, procs,
if verbosity != 0 {
verbosityArg = fmt.Sprintf(" -vv=%v", verbosity)
}
+ optionalArg := ""
+ if optionalFlags {
+ optionalArg = " " + tool.OptionalFlags([]tool.Flag{
+ {Name: "slowdown", Value: fmt.Sprint(slowdown)},
+ })
+ }
return fmt.Sprintf("%v -executor=%v -name=%v -arch=%v%v -manager=%v -sandbox=%v"+
- " -procs=%v -cover=%v -debug=%v -test=%v%v%v",
+ " -procs=%v -cover=%v -debug=%v -test=%v%v%v%v",
fuzzer, executor, name, arch, osArg, fwdAddr, sandbox,
- procs, cover, debug, test, runtestArg, verbosityArg)
+ procs, cover, debug, test, runtestArg, verbosityArg, optionalArg)
}
-func OldFuzzerCmd(fuzzer, executor, name, OS, arch, fwdAddr, sandbox string, procs int, cover, test bool) string {
- return FuzzerCmd(fuzzer, executor, name, OS, arch, fwdAddr, sandbox, procs, 0, cover, false, test, false)
+func OldFuzzerCmd(fuzzer, executor, name, OS, arch, fwdAddr, sandbox string, procs int,
+ cover, test, optionalFlags bool, slowdown int) string {
+ return FuzzerCmd(fuzzer, executor, name, OS, arch, fwdAddr, sandbox, procs, 0, cover, false, test, false,
+ optionalFlags, slowdown)
}
func ExecprogCmd(execprog, executor, OS, arch, sandbox string, repeat, threaded, collide bool,
- procs, faultCall, faultNth int, progFile string) string {
+ procs, faultCall, faultNth int, optionalFlags bool, slowdown int, progFile string) string {
repeatCount := 1
if repeat {
repeatCount = 0
@@ -456,12 +479,18 @@ func ExecprogCmd(execprog, executor, OS, arch, sandbox string, repeat, threaded,
if targets.Get(OS, arch).HostFuzzer {
osArg = " -os=" + OS
}
+ optionalArg := ""
+ if optionalFlags {
+ optionalArg = " " + tool.OptionalFlags([]tool.Flag{
+ {Name: "slowdown", Value: fmt.Sprint(slowdown)},
+ })
+ }
return fmt.Sprintf("%v -executor=%v -arch=%v%v -sandbox=%v"+
" -procs=%v -repeat=%v -threaded=%v -collide=%v -cover=0"+
- " -fault_call=%v -fault_nth=%v %v",
+ " -fault_call=%v -fault_nth=%v%v %v",
execprog, executor, arch, osArg, sandbox,
procs, repeatCount, threaded, collide,
- faultCall, faultNth, progFile)
+ faultCall, faultNth, optionalArg, progFile)
}
var MakeBin = func() string {
diff --git a/pkg/instance/instance_test.go b/pkg/instance/instance_test.go
index e19f91faf..d95b6623e 100644
--- a/pkg/instance/instance_test.go
+++ b/pkg/instance/instance_test.go
@@ -10,6 +10,7 @@ import (
"strings"
"testing"
+ "github.com/google/syzkaller/pkg/tool"
"github.com/google/syzkaller/sys/targets"
)
@@ -32,7 +33,7 @@ func TestFuzzerCmd(t *testing.T) {
flagDebug := flags.Bool("debug", false, "debug output from executor")
flagV := flags.Int("v", 0, "verbosity")
cmdLine := OldFuzzerCmd(os.Args[0], "/myexecutor", "myname", targets.Linux, targets.I386, "localhost:1234",
- "namespace", 3, true, true)
+ "namespace", 3, true, true, false, 0)
args := strings.Split(cmdLine, " ")[1:]
if err := flags.Parse(args); err != nil {
t.Fatal(err)
@@ -93,10 +94,11 @@ func TestExecprogCmd(t *testing.T) {
flagCollide := flags.Bool("collide", true, "collide syscalls to provoke data races")
flagSignal := flags.Bool("cover", false, "collect feedback signals (coverage)")
flagSandbox := flags.String("sandbox", "none", "sandbox for fuzzing (none/setuid/namespace)")
+ flagSlowdown := flags.Int("slowdown", 1, "")
cmdLine := ExecprogCmd(os.Args[0], "/myexecutor", targets.FreeBSD, targets.I386,
- "namespace", true, false, false, 7, 2, 3, "myprog")
+ "namespace", true, false, false, 7, 2, 3, true, 10, "myprog")
args := strings.Split(cmdLine, " ")[1:]
- if err := flags.Parse(args); err != nil {
+ if err := tool.ParseFlags(flags, args); err != nil {
t.Fatal(err)
}
if len(flags.Args()) != 1 || flags.Arg(0) != "myprog" {
@@ -135,4 +137,7 @@ func TestExecprogCmd(t *testing.T) {
if *flagCollide {
t.Errorf("bad collide: %v, want: %v", *flagCollide, false)
}
+ if *flagSlowdown != 10 {
+ t.Errorf("bad slowdown: %v, want: %v", *flagSlowdown, 10)
+ }
}
diff --git a/pkg/ipc/ipc.go b/pkg/ipc/ipc.go
index 0f1bd50a6..7e0bf7e94 100644
--- a/pkg/ipc/ipc.go
+++ b/pkg/ipc/ipc.go
@@ -73,6 +73,8 @@ type Config struct {
// Flags are configuation flags, defined above.
Flags EnvFlags
+
+ Timeouts targets.Timeouts
}
type CallFlags uint32
@@ -154,6 +156,10 @@ func FlagsToSandbox(flags EnvFlags) string {
}
func MakeEnv(config *Config, pid int) (*Env, error) {
+ if config.Timeouts.Slowdown == 0 || config.Timeouts.Scale == 0 ||
+ config.Timeouts.Syscall == 0 || config.Timeouts.Program == 0 {
+ return nil, fmt.Errorf("ipc.MakeEnv: uninitialized timeouts (%+v)", config.Timeouts)
+ }
var inf, outf *os.File
var inmem, outmem []byte
if config.UseShmem {
@@ -731,9 +737,9 @@ func (c *command) exec(opts *ExecOpts, progData []byte) (output []byte, hanged b
pid: uint64(c.pid),
faultCall: uint64(opts.FaultCall),
faultNth: uint64(opts.FaultNth),
- syscallTimeoutMS: 50,
- programTimeoutMS: 5000,
- slowdownScale: 1,
+ syscallTimeoutMS: uint64(c.config.Timeouts.Syscall / time.Millisecond),
+ programTimeoutMS: uint64(c.config.Timeouts.Program / time.Millisecond),
+ slowdownScale: uint64(c.config.Timeouts.Scale),
progSize: uint64(len(progData)),
}
reqData := (*[unsafe.Sizeof(*req)]byte)(unsafe.Pointer(req))[:]
diff --git a/pkg/ipc/ipc_test.go b/pkg/ipc/ipc_test.go
index e5e919a89..44fdb67bd 100644
--- a/pkg/ipc/ipc_test.go
+++ b/pkg/ipc/ipc_test.go
@@ -30,7 +30,7 @@ func buildExecutor(t *testing.T, target *prog.Target) string {
return bin
}
-func initTest(t *testing.T) (*prog.Target, rand.Source, int, bool, bool) {
+func initTest(t *testing.T) (*prog.Target, rand.Source, int, bool, bool, targets.Timeouts) {
t.Parallel()
iters := 100
if testing.Short() {
@@ -50,7 +50,7 @@ func initTest(t *testing.T) (*prog.Target, rand.Source, int, bool, bool) {
if err != nil {
t.Fatal(err)
}
- return target, rs, iters, cfg.UseShmem, cfg.UseForkServer
+ return target, rs, iters, cfg.UseShmem, cfg.UseForkServer, cfg.Timeouts
}
// TestExecutor runs all internal executor unit tests.
@@ -82,7 +82,7 @@ func TestExecutor(t *testing.T) {
}
func TestExecute(t *testing.T) {
- target, _, _, useShmem, useForkServer := initTest(t)
+ target, _, _, useShmem, useForkServer, timeouts := initTest(t)
bin := buildExecutor(t, target)
defer os.Remove(bin)
@@ -94,6 +94,7 @@ func TestExecute(t *testing.T) {
Executor: bin,
UseShmem: useShmem,
UseForkServer: useForkServer,
+ Timeouts: timeouts,
}
env, err := MakeEnv(cfg, 0)
if err != nil {
@@ -127,13 +128,14 @@ func TestExecute(t *testing.T) {
}
func TestParallel(t *testing.T) {
- target, _, _, useShmem, useForkServer := initTest(t)
+ target, _, _, useShmem, useForkServer, timeouts := initTest(t)
bin := buildExecutor(t, target)
defer os.Remove(bin)
cfg := &Config{
Executor: bin,
UseShmem: useShmem,
UseForkServer: useForkServer,
+ Timeouts: timeouts,
}
const P = 10
errs := make(chan error, P)
diff --git a/pkg/ipc/ipcconfig/ipcconfig.go b/pkg/ipc/ipcconfig/ipcconfig.go
index a99cbdadc..3791322f2 100644
--- a/pkg/ipc/ipcconfig/ipcconfig.go
+++ b/pkg/ipc/ipcconfig/ipcconfig.go
@@ -18,11 +18,14 @@ var (
flagSignal = flag.Bool("cover", false, "collect feedback signals (coverage)")
flagSandbox = flag.String("sandbox", "none", "sandbox for fuzzing (none/setuid/namespace/android)")
flagDebug = flag.Bool("debug", false, "debug output from executor")
+ flagSlowdown = flag.Int("slowdown", 1, "execution slowdown caused by emulation/instrumentation")
)
func Default(target *prog.Target) (*ipc.Config, *ipc.ExecOpts, error) {
+ sysTarget := targets.Get(target.OS, target.Arch)
c := &ipc.Config{
Executor: *flagExecutor,
+ Timeouts: sysTarget.Timeouts(*flagSlowdown),
}
if *flagSignal {
c.Flags |= ipc.FlagSignal
@@ -35,7 +38,6 @@ func Default(target *prog.Target) (*ipc.Config, *ipc.ExecOpts, error) {
return nil, nil, err
}
c.Flags |= sandboxFlags
- sysTarget := targets.Get(target.OS, target.Arch)
c.UseShmem = sysTarget.ExecutorUsesShmem
c.UseForkServer = sysTarget.ExecutorUsesForkServer
opts := &ipc.ExecOpts{
diff --git a/pkg/mgrconfig/load.go b/pkg/mgrconfig/load.go
index a11de44be..9814987a7 100644
--- a/pkg/mgrconfig/load.go
+++ b/pkg/mgrconfig/load.go
@@ -9,6 +9,7 @@ import (
"os"
"path/filepath"
"regexp"
+ "runtime"
"strings"
"github.com/google/syzkaller/pkg/config"
@@ -34,6 +35,7 @@ type Derived struct {
ExecutorBin string
Syscalls []int
+ Timeouts targets.Timeouts
}
func LoadData(data []byte) (*Config, error) {
@@ -171,9 +173,27 @@ func Complete(cfg *Config) error {
if err != nil {
return err
}
+ cfg.initTimeouts()
return nil
}
+func (cfg *Config) initTimeouts() {
+ slowdown := 1
+ switch {
+ case cfg.Type == "qemu" && runtime.GOARCH != cfg.SysTarget.Arch && runtime.GOARCH != cfg.SysTarget.VMArch:
+ // Assuming qemu emulation.
+ // Quick tests of mmap syscall on arm64 show ~9x slowdown.
+ slowdown = 10
+ case cfg.Type == "gvisor" && cfg.Cover && strings.Contains(cfg.Name, "-race"):
+ // Go coverage+race has insane slowdown of ~350x. We can't afford such large value,
+ // but a smaller value should be enough to finish at least some syscalls.
+ // Note: the name check is a hack.
+ slowdown = 10
+ }
+ // Note: we could also consider heavy debug tools (KASAN/KMSAN/KCSAN/KMEMLEAK) if necessary.
+ cfg.Timeouts = cfg.SysTarget.Timeouts(slowdown)
+}
+
func checkNonEmpty(fields ...string) error {
for i := 0; i < len(fields); i += 2 {
if fields[i] == "" {
diff --git a/pkg/repro/repro.go b/pkg/repro/repro.go
index ad0d7c4df..aca892cb9 100644
--- a/pkg/repro/repro.go
+++ b/pkg/repro/repro.go
@@ -49,10 +49,11 @@ type context struct {
crashType report.Type
instances chan *instance
bootRequests chan int
- timeouts []time.Duration
+ testTimeouts []time.Duration
startOpts csource.Options
stats *Stats
report *report.Report
+ timeouts targets.Timeouts
}
type instance struct {
@@ -78,10 +79,11 @@ func Run(crashLog []byte, cfg *mgrconfig.Config, features *host.Features, report
crashTitle = rep.Title
crashType = rep.Type
}
- // The shortest duration is 10 seconds to detect simple crashes (i.e. no races and no hangs).
- // The longest duration is 6 minutes to catch races and hangs.
- noOutputTimeout := vm.NoOutputTimeout + time.Minute
- timeouts := []time.Duration{15 * time.Second, time.Minute, noOutputTimeout}
+ testTimeouts := []time.Duration{
+ 3 * cfg.Timeouts.Program, // to catch simpler crashes (i.e. no races and no hangs)
+ 20 * cfg.Timeouts.Program,
+ cfg.Timeouts.NoOutputRunningTime, // to catch "no output", races and hangs
+ }
switch {
case crashTitle == "":
crashTitle = "no output/lost connection"
@@ -89,12 +91,12 @@ func Run(crashLog []byte, cfg *mgrconfig.Config, features *host.Features, report
// but theoretically if it's caused by a race it may need the largest timeout.
// No output can only be reproduced with the max timeout.
// As a compromise we use the smallest and the largest timeouts.
- timeouts = []time.Duration{15 * time.Second, noOutputTimeout}
+ testTimeouts = []time.Duration{testTimeouts[0], testTimeouts[2]}
case crashType == report.MemoryLeak:
// Memory leaks can't be detected quickly because of expensive setup and scanning.
- timeouts = []time.Duration{time.Minute, noOutputTimeout}
+ testTimeouts = testTimeouts[1:]
case crashType == report.Hang:
- timeouts = []time.Duration{noOutputTimeout}
+ testTimeouts = testTimeouts[2:]
}
ctx := &context{
target: cfg.SysTarget,
@@ -103,11 +105,12 @@ func Run(crashLog []byte, cfg *mgrconfig.Config, features *host.Features, report
crashType: crashType,
instances: make(chan *instance, len(vmIndexes)),
bootRequests: make(chan int, len(vmIndexes)),
- timeouts: timeouts,
+ testTimeouts: testTimeouts,
startOpts: createStartOptions(cfg, features, crashType),
stats: new(Stats),
+ timeouts: cfg.Timeouts,
}
- ctx.reproLogf(0, "%v programs, %v VMs, timeouts %v", len(entries), len(vmIndexes), timeouts)
+ ctx.reproLogf(0, "%v programs, %v VMs, timeouts %v", len(entries), len(vmIndexes), testTimeouts)
var wg sync.WaitGroup
wg.Add(len(vmIndexes))
for _, vmIndex := range vmIndexes {
@@ -308,7 +311,7 @@ func (ctx *context) extractProg(entries []*prog.LogEntry) (*Result, error) {
for i := len(indices) - 1; i >= 0; i-- {
lastEntries = append(lastEntries, entries[indices[i]])
}
- for _, timeout := range ctx.timeouts {
+ for _, timeout := range ctx.testTimeouts {
// Execute each program separately to detect simple crashes caused by a single program.
// Programs are executed in reverse order, usually the last program is the guilty one.
res, err := ctx.extractProgSingle(lastEntries, timeout)
@@ -484,7 +487,7 @@ func (ctx *context) simplifyProg(res *Result) (*Result, error) {
for _, simplify := range progSimplifies {
opts := res.Opts
- if !simplify(&opts) || !checkOpts(&opts, res.Duration) {
+ if !simplify(&opts) || !checkOpts(&opts, ctx.timeouts, res.Duration) {
continue
}
crashed, err := ctx.testProg(res.Prog, res.Duration, opts)
@@ -534,7 +537,7 @@ func (ctx *context) simplifyC(res *Result) (*Result, error) {
for _, simplify := range cSimplifies {
opts := res.Opts
- if !simplify(&opts) || !checkOpts(&opts, res.Duration) {
+ if !simplify(&opts) || !checkOpts(&opts, ctx.timeouts, res.Duration) {
continue
}
crashed, err := ctx.testCProg(res.Prog, res.Duration, opts)
@@ -549,7 +552,7 @@ func (ctx *context) simplifyC(res *Result) (*Result, error) {
return res, nil
}
-func checkOpts(opts *csource.Options, timeout time.Duration) bool {
+func checkOpts(opts *csource.Options, timeouts targets.Timeouts, timeout time.Duration) bool {
if !opts.Repeat && timeout >= time.Minute {
// If we have a non-repeating C reproducer with timeout > vm.NoOutputTimeout and it hangs
// (the reproducer itself does not terminate on its own, note: it does not have builtin timeout),
@@ -621,7 +624,7 @@ func (ctx *context) testProgs(entries []*prog.LogEntry, duration time.Duration,
command := instancePkg.ExecprogCmd(inst.execprogBin, inst.executorBin,
ctx.target.OS, ctx.target.Arch, opts.Sandbox, opts.Repeat,
- opts.Threaded, opts.Collide, opts.Procs, -1, -1, vmProgFile)
+ opts.Threaded, opts.Collide, opts.Procs, -1, -1, true, ctx.timeouts.Slowdown, vmProgFile)
ctx.reproLogf(2, "testing program (duration=%v, %+v): %s", duration, opts, program)
ctx.reproLogf(3, "detailed listing:\n%s", pstr)
return ctx.testImpl(inst.Instance, command, duration)
diff --git a/pkg/rpctype/rpc.go b/pkg/rpctype/rpc.go
index a7900bb13..76c4c3f9c 100644
--- a/pkg/rpctype/rpc.go
+++ b/pkg/rpctype/rpc.go
@@ -43,7 +43,7 @@ func (serv *RPCServer) Serve() {
log.Logf(0, "failed to accept an rpc connection: %v", err)
continue
}
- setupKeepAlive(conn, 10*time.Second)
+ setupKeepAlive(conn, time.Minute)
go serv.s.ServeConn(newFlateConn(conn))
}
}
@@ -53,39 +53,44 @@ func (serv *RPCServer) Addr() net.Addr {
}
type RPCClient struct {
- conn net.Conn
- c *rpc.Client
+ conn net.Conn
+ c *rpc.Client
+ timeScale time.Duration
}
-func Dial(addr string) (net.Conn, error) {
+func Dial(addr string, timeScale time.Duration) (net.Conn, error) {
+ if timeScale <= 0 {
+ return nil, fmt.Errorf("bad rpc time scale %v", timeScale)
+ }
var conn net.Conn
var err error
if addr == "stdin" {
// This is used by vm/gvisor which passes us a unix socket connection in stdin.
return net.FileConn(os.Stdin)
}
- if conn, err = net.DialTimeout("tcp", addr, 60*time.Second); err != nil {
+ if conn, err = net.DialTimeout("tcp", addr, time.Minute*timeScale); err != nil {
return nil, err
}
- setupKeepAlive(conn, time.Minute)
+ setupKeepAlive(conn, time.Minute*timeScale)
return conn, nil
}
-func NewRPCClient(addr string) (*RPCClient, error) {
- conn, err := Dial(addr)
+func NewRPCClient(addr string, timeScale time.Duration) (*RPCClient, error) {
+ conn, err := Dial(addr, timeScale)
if err != nil {
return nil, err
}
cli := &RPCClient{
- conn: conn,
- c: rpc.NewClient(newFlateConn(conn)),
+ conn: conn,
+ c: rpc.NewClient(newFlateConn(conn)),
+ timeScale: timeScale,
}
return cli, nil
}
func (cli *RPCClient) Call(method string, args, reply interface{}) error {
// Note: SetDeadline is not implemented on fuchsia, so don't fail on error.
- cli.conn.SetDeadline(time.Now().Add(5 * 60 * time.Second))
+ cli.conn.SetDeadline(time.Now().Add(3 * time.Minute * cli.timeScale))
defer cli.conn.SetDeadline(time.Time{})
return cli.c.Call(method, args, reply)
}
@@ -94,8 +99,8 @@ func (cli *RPCClient) Close() {
cli.c.Close()
}
-func RPCCall(addr, method string, args, reply interface{}) error {
- c, err := NewRPCClient(addr)
+func RPCCall(addr string, timeScale time.Duration, method string, args, reply interface{}) error {
+ c, err := NewRPCClient(addr, timeScale)
if err != nil {
return err
}
diff --git a/pkg/runtest/run.go b/pkg/runtest/run.go
index 8366b8fb3..4d81e2b60 100644
--- a/pkg/runtest/run.go
+++ b/pkg/runtest/run.go
@@ -376,6 +376,7 @@ func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bo
opts := new(ipc.ExecOpts)
cfg.UseShmem = sysTarget.ExecutorUsesShmem
cfg.UseForkServer = sysTarget.ExecutorUsesForkServer
+ cfg.Timeouts = sysTarget.Timeouts(1)
sandboxFlags, err := ipc.SandboxToFlags(sandbox)
if err != nil {
return nil, err
@@ -427,6 +428,7 @@ func (ctx *Context) createCTest(p *prog.Prog, sandbox string, threaded bool, tim
Repeat: times > 1,
RepeatTimes: times,
Procs: 1,
+ Slowdown: 1,
Sandbox: sandbox,
UseTmpDir: true,
HandleSegv: true,
diff --git a/sys/linux/init.go b/sys/linux/init.go
index a18fa99c4..15d6f7960 100644
--- a/sys/linux/init.go
+++ b/sys/linux/init.go
@@ -280,6 +280,7 @@ func (arch *arch) generateTimespec(g *prog.Gen, typ0 prog.Type, dir prog.Dir, ol
// Note: timespec/timeval can be absolute or relative to now.
// Note: executor has blocking syscall timeout of 45 ms,
// so we generate both 10ms and 60ms.
+ // TODO(dvyukov): this is now all outdated with tunable timeouts.
const (
timeout1 = uint64(10)
timeout2 = uint64(60)
diff --git a/sys/targets/targets.go b/sys/targets/targets.go
index 73a6a8b11..7102d6f92 100644
--- a/sys/targets/targets.go
+++ b/sys/targets/targets.go
@@ -11,6 +11,7 @@ import (
"runtime"
"strings"
"sync"
+ "time"
)
type Target struct {
@@ -41,7 +42,8 @@ type Target struct {
init *sync.Once
initOther *sync.Once
// Target for the other compiler. If SYZ_CLANG says to use gcc, this will be clang. Or the other way around.
- other *Target
+ other *Target
+ timeouts Timeouts
}
type osCommon struct {
@@ -78,6 +80,39 @@ type osCommon struct {
cflags []string
}
+// Timeouts structure parametrizes timeouts throughout the system.
+// It allows to support different operating system, architectures and execution environments
+// (emulation, models, etc) without scattering and duplicating knowledge about their execution
+// performance everywhere.
+// Timeouts calculation consists of 2 parts: base values and scaling.
+// Base timeout values consist of a single syscall timeout, program timeout and "no output" timeout
+// and are specified by the target (OS/arch), or defaults are used.
+// Scaling part is calculated from the execution environment in pkg/mgrconfig based on VM type,
+// kernel build type, emulation, etc. Scaling is specifically converged to a single number so that
+// it can be specified/overridden for command line tools (e.g. syz-execprog -slowdown=10).
+type Timeouts struct {
+ // Base scaling factor, used only for a single syscall timeout.
+ Slowdown int
+ // Capped scaling factor used for timeouts other than syscall timeout.
+ // It's already applied to all values in this struct, but can be used for one-off timeout values
+ // in the system. This should also be applied to syscall/program timeout attributes in syscall descriptions.
+ // Derived from Slowdown and should not be greater than Slowdown.
+ // The idea behind capping is that slowdown can be large (10-20) and most timeouts already
+ // include some safety margin. If we just multiply them we will get too large timeouts,
+ // e.g. program timeout can become 5s*20 = 100s, or "no output" timeout: 5m*20 = 100m.
+ Scale time.Duration
+ // Timeout for a single syscall, after this time the syscall is considered "blocked".
+ Syscall time.Duration
+ // Timeout for a single program execution.
+ Program time.Duration
+ // Timeout for "no output" detection.
+ NoOutput time.Duration
+ // Limit on a single VM running time, after this time a VM is restarted.
+ VMRunningTime time.Duration
+ // How long we should test to get "no output" error (derivative of NoOutput, here to avoid duplication).
+ NoOutputRunningTime time.Duration
+}
+
const (
Akaros = "akaros"
FreeBSD = "freebsd"
@@ -634,6 +669,45 @@ func initTarget(target *Target, OS, arch string) {
}
}
+func (target *Target) Timeouts(slowdown int) Timeouts {
+ if slowdown <= 0 {
+ panic(fmt.Sprintf("bad slowdown %v", slowdown))
+ }
+ timeouts := target.timeouts
+ timeouts.Slowdown = slowdown
+ timeouts.Scale = time.Duration(slowdown)
+ if timeouts.Scale > 3 {
+ timeouts.Scale = 3
+ }
+ if timeouts.Syscall == 0 {
+ timeouts.Syscall = 50 * time.Millisecond
+ }
+ if timeouts.Program == 0 {
+ timeouts.Program = 5 * time.Second
+ }
+ if timeouts.NoOutput == 0 {
+ // The timeout used to be 3 mins for a long time.
+ // But (1) we were seeing flakes on linux where net namespace
+ // destruction can be really slow, and (2) gVisor watchdog timeout
+ // is 3 mins + 1/4 of that for checking period = 3m45s.
+ // Current linux max timeout is CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=140
+ // and workqueue.watchdog_thresh=140 which both actually result
+ // in 140-280s detection delay.
+ // So the current timeout is 5 mins (300s).
+ // We don't want it to be too long too because it will waste time on real hangs.
+ timeouts.NoOutput = 5 * time.Minute
+ }
+ if timeouts.VMRunningTime == 0 {
+ timeouts.VMRunningTime = time.Hour
+ }
+ timeouts.Syscall *= time.Duration(slowdown)
+ timeouts.Program *= timeouts.Scale
+ timeouts.NoOutput *= timeouts.Scale
+ timeouts.VMRunningTime *= timeouts.Scale
+ timeouts.NoOutputRunningTime = timeouts.NoOutput + time.Minute
+ return timeouts
+}
+
func (target *Target) setCompiler(clang bool) {
// setCompiler may be called effectively twice for target.other,
// so first we remove flags the previous call may have added.
diff --git a/syz-fuzzer/fuzzer.go b/syz-fuzzer/fuzzer.go
index 36c13adc4..404141c78 100644
--- a/syz-fuzzer/fuzzer.go
+++ b/syz-fuzzer/fuzzer.go
@@ -27,6 +27,7 @@ import (
"github.com/google/syzkaller/pkg/tool"
"github.com/google/syzkaller/prog"
_ "github.com/google/syzkaller/sys"
+ "github.com/google/syzkaller/sys/targets"
)
type Fuzzer struct {
@@ -43,6 +44,7 @@ type Fuzzer struct {
manager *rpctype.RPCClient
target *prog.Target
triagedCandidates uint32
+ timeouts targets.Timeouts
faultInjectionEnabled bool
comparisonTracingEnabled bool
@@ -152,6 +154,7 @@ func main() {
if err != nil {
log.Fatalf("failed to create default ipc config: %v", err)
}
+ timeouts := config.Timeouts
sandbox := ipc.FlagsToSandbox(config.Flags)
shutdown := make(chan struct{})
osutil.HandleInterrupts(shutdown)
@@ -179,7 +182,7 @@ func main() {
}
log.Logf(0, "dialing manager at %v", *flagManager)
- manager, err := rpctype.NewRPCClient(*flagManager)
+ manager, err := rpctype.NewRPCClient(*flagManager, timeouts.Scale)
if err != nil {
log.Fatalf("failed to connect to manager: %v ", err)
}
@@ -243,6 +246,7 @@ func main() {
needPoll: needPoll,
manager: manager,
target: target,
+ timeouts: timeouts,
faultInjectionEnabled: r.CheckResult.Features[host.FeatureFault].Enabled,
comparisonTracingEnabled: r.CheckResult.Features[host.FeatureComparisons].Enabled,
corpusHashes: make(map[hash.Sig]struct{}),
@@ -300,7 +304,8 @@ func (fuzzer *Fuzzer) gateCallback(leakFrames []string) {
return
}
args := append([]string{"leak"}, leakFrames...)
- output, err := osutil.RunCmd(10*time.Minute, "", fuzzer.config.Executor, args...)
+ timeout := fuzzer.timeouts.NoOutput * 9 / 10
+ output, err := osutil.RunCmd(timeout, "", fuzzer.config.Executor, args...)
if err != nil && triagedCandidates == 2 {
// If we exit right away, dying executors will dump lots of garbage to console.
os.Stdout.Write(output)
@@ -315,7 +320,8 @@ func (fuzzer *Fuzzer) gateCallback(leakFrames []string) {
func (fuzzer *Fuzzer) filterDataRaceFrames(frames []string) {
args := append([]string{"setup_kcsan_filterlist"}, frames...)
- output, err := osutil.RunCmd(10*time.Minute, "", fuzzer.config.Executor, args...)
+ timeout := time.Minute * fuzzer.timeouts.Scale
+ output, err := osutil.RunCmd(timeout, "", fuzzer.config.Executor, args...)
if err != nil {
log.Fatalf("failed to set KCSAN filterlist: %v", err)
}
@@ -326,7 +332,7 @@ func (fuzzer *Fuzzer) pollLoop() {
var execTotal uint64
var lastPoll time.Time
var lastPrint time.Time
- ticker := time.NewTicker(3 * time.Second).C
+ ticker := time.NewTicker(3 * time.Second * fuzzer.timeouts.Scale).C
for {
poll := false
select {
@@ -334,12 +340,12 @@ func (fuzzer *Fuzzer) pollLoop() {
case <-fuzzer.needPoll:
poll = true
}
- if fuzzer.outputType != OutputStdout && time.Since(lastPrint) > 10*time.Second {
+ if fuzzer.outputType != OutputStdout && time.Since(lastPrint) > 10*time.Second*fuzzer.timeouts.Scale {
// Keep-alive for manager.
log.Logf(0, "alive, executed %v", execTotal)
lastPrint = time.Now()
}
- if poll || time.Since(lastPoll) > 10*time.Second {
+ if poll || time.Since(lastPoll) > 10*time.Second*fuzzer.timeouts.Scale {
needCandidates := fuzzer.workQueue.wantCandidates()
if poll && !needCandidates {
continue
diff --git a/syz-fuzzer/testing.go b/syz-fuzzer/testing.go
index c003cc1e9..a8d119d92 100644
--- a/syz-fuzzer/testing.go
+++ b/syz-fuzzer/testing.go
@@ -33,7 +33,7 @@ type checkArgs struct {
func testImage(hostAddr string, args *checkArgs) {
log.Logf(0, "connecting to host at %v", hostAddr)
- conn, err := rpctype.Dial(hostAddr)
+ conn, err := rpctype.Dial(hostAddr, args.ipcConfig.Timeouts.Scale)
if err != nil {
log.Fatalf("BUG: failed to connect to host: %v", err)
}
diff --git a/syz-manager/hub.go b/syz-manager/hub.go
index d3f6a4bd4..f42c71206 100644
--- a/syz-manager/hub.go
+++ b/syz-manager/hub.go
@@ -101,10 +101,10 @@ func (hc *HubConnector) connect(corpus [][]byte) (*rpctype.RPCClient, error) {
}
// Hub.Connect request can be very large, so do it on a transient connection
// (rpc connection buffers never shrink).
- if err := rpctype.RPCCall(hc.cfg.HubAddr, "Hub.Connect", a, nil); err != nil {
+ if err := rpctype.RPCCall(hc.cfg.HubAddr, 1, "Hub.Connect", a, nil); err != nil {
return nil, err
}
- hub, err := rpctype.NewRPCClient(hc.cfg.HubAddr)
+ hub, err := rpctype.NewRPCClient(hc.cfg.HubAddr, 1)
if err != nil {
return nil, err
}
diff --git a/syz-manager/manager.go b/syz-manager/manager.go
index 18691c399..783f0425e 100644
--- a/syz-manager/manager.go
+++ b/syz-manager/manager.go
@@ -293,7 +293,7 @@ func (mgr *Manager) vmLoop() {
go func() {
for i := 0; i < vmCount; i++ {
bootInstance <- i
- time.Sleep(10 * time.Second)
+ time.Sleep(10 * time.Second * mgr.cfg.Timeouts.Scale)
}
}()
var instances []int
@@ -629,8 +629,8 @@ func (mgr *Manager) runInstanceInner(index int, instanceName string) (*report.Re
cmd := instance.FuzzerCmd(fuzzerBin, executorBin, instanceName,
mgr.cfg.TargetOS, mgr.cfg.TargetArch, fwdAddr, mgr.cfg.Sandbox, procs, fuzzerV,
- mgr.cfg.Cover, *flagDebug, false, false)
- outc, errc, err := inst.Run(time.Hour, mgr.vmStop, cmd)
+ mgr.cfg.Cover, *flagDebug, false, false, true, mgr.cfg.Timeouts.Slowdown)
+ outc, errc, err := inst.Run(mgr.cfg.Timeouts.VMRunningTime, mgr.vmStop, cmd)
if err != nil {
return nil, fmt.Errorf("failed to run fuzzer: %v", err)
}
diff --git a/tools/syz-crush/crush.go b/tools/syz-crush/crush.go
index 004ebd3fa..5b2dcfffd 100644
--- a/tools/syz-crush/crush.go
+++ b/tools/syz-crush/crush.go
@@ -23,14 +23,13 @@ import (
"github.com/google/syzkaller/pkg/mgrconfig"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/report"
- "github.com/google/syzkaller/prog"
"github.com/google/syzkaller/vm"
)
var (
flagConfig = flag.String("config", "", "manager configuration file")
flagDebug = flag.Bool("debug", false, "dump all VM output to console")
- flagRestartTime = flag.Duration("restart_time", time.Hour, "how long to run the test")
+ flagRestartTime = flag.Duration("restart_time", 0, "how long to run the test")
flagInfinite = flag.Bool("infinite", true, "by default test is run for ever, -infinite=false to stop on crash")
)
@@ -52,7 +51,9 @@ func main() {
if err != nil {
log.Fatal(err)
}
-
+ if *flagRestartTime == 0 {
+ *flagRestartTime *= cfg.Timeouts.VMRunningTime
+ }
if *flagInfinite {
log.Printf("running infinitely and restarting VM every %v", *flagRestartTime)
} else {
@@ -101,7 +102,7 @@ func main() {
for i := 0; i < vmPool.Count(); i++ {
go func(index int) {
for {
- runDone <- runInstance(cfg.Target, cfg, reporter, vmPool, index, *flagRestartTime, runType)
+ runDone <- runInstance(cfg, reporter, vmPool, index, *flagRestartTime, runType)
if atomic.LoadUint32(&shutdown) != 0 || !*flagInfinite {
// If this is the last worker then we can close the channel.
if atomic.AddUint32(&stoppedWorkers, 1) == uint32(vmPool.Count()) {
@@ -165,7 +166,7 @@ func storeCrash(cfg *mgrconfig.Config, rep *report.Report) {
}
}
-func runInstance(target *prog.Target, cfg *mgrconfig.Config, reporter report.Reporter,
+func runInstance(cfg *mgrconfig.Config, reporter report.Reporter,
vmPool *vm.Pool, index int, timeout time.Duration, runType FileType) *report.Report {
log.Printf("vm-%v: starting", index)
inst, err := vmPool.Create(index)
@@ -200,7 +201,7 @@ func runInstance(target *prog.Target, cfg *mgrconfig.Config, reporter report.Rep
}
cmd = instance.ExecprogCmd(execprogBin, executorBin, cfg.TargetOS, cfg.TargetArch, cfg.Sandbox,
- true, true, true, cfg.Procs, -1, -1, logFile)
+ true, true, true, cfg.Procs, -1, -1, true, cfg.Timeouts.Slowdown, logFile)
} else {
cmd = execprogBin
}
diff --git a/tools/syz-execprog/execprog.go b/tools/syz-execprog/execprog.go
index c24a515ca..50879e6f6 100644
--- a/tools/syz-execprog/execprog.go
+++ b/tools/syz-execprog/execprog.go
@@ -283,8 +283,7 @@ func loadPrograms(target *prog.Target, files []string) []*prog.LogEntry {
return entries
}
-func createConfig(target *prog.Target,
- features *host.Features, featuresFlags csource.Features) (
+func createConfig(target *prog.Target, features *host.Features, featuresFlags csource.Features) (
*ipc.Config, *ipc.ExecOpts) {
config, execOpts, err := ipcconfig.Default(target)
if err != nil {
diff --git a/tools/syz-hubtool/hubtool.go b/tools/syz-hubtool/hubtool.go
index bf5f6c20b..491191a4c 100644
--- a/tools/syz-hubtool/hubtool.go
+++ b/tools/syz-hubtool/hubtool.go
@@ -51,7 +51,7 @@ func main() {
return
}
log.Printf("connecting to hub at %v...", *flagHubAddress)
- conn, err := rpctype.NewRPCClient(*flagHubAddress)
+ conn, err := rpctype.NewRPCClient(*flagHubAddress, 1)
if err != nil {
log.Fatalf("failed to connect to hub: %v", err)
}
diff --git a/tools/syz-runtest/runtest.go b/tools/syz-runtest/runtest.go
index 55e07d58c..8ef15af38 100644
--- a/tools/syz-runtest/runtest.go
+++ b/tools/syz-runtest/runtest.go
@@ -175,7 +175,7 @@ func (mgr *Manager) boot(name string, index int) (*report.Report, error) {
}
cmd := instance.FuzzerCmd(fuzzerBin, executorBin, name,
mgr.cfg.TargetOS, mgr.cfg.TargetArch, fwdAddr, mgr.cfg.Sandbox, mgr.cfg.Procs, 0,
- mgr.cfg.Cover, mgr.debug, false, true)
+ mgr.cfg.Cover, mgr.debug, false, true, true, mgr.cfg.Timeouts.Slowdown)
outc, errc, err := inst.Run(time.Hour, mgr.vmStop, cmd)
if err != nil {
return nil, fmt.Errorf("failed to run fuzzer: %v", err)
diff --git a/vm/vm.go b/vm/vm.go
index f09ee38b7..94f072545 100644
--- a/vm/vm.go
+++ b/vm/vm.go
@@ -18,6 +18,7 @@ import (
"github.com/google/syzkaller/pkg/mgrconfig"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/report"
+ "github.com/google/syzkaller/sys/targets"
"github.com/google/syzkaller/vm/vmimpl"
// Import all VM implementations, so that users only need to import vm.
@@ -37,12 +38,14 @@ type Pool struct {
impl vmimpl.Pool
workdir string
template string
+ timeouts targets.Timeouts
}
type Instance struct {
- impl vmimpl.Instance
- workdir string
- index int
+ impl vmimpl.Instance
+ workdir string
+ timeouts targets.Timeouts
+ index int
}
var (
@@ -92,6 +95,7 @@ func Create(cfg *mgrconfig.Config, debug bool) (*Pool, error) {
impl: impl,
workdir: env.Workdir,
template: cfg.WorkdirTemplate,
+ timeouts: cfg.Timeouts,
}, nil
}
@@ -118,9 +122,10 @@ func (pool *Pool) Create(index int) (*Instance, error) {
return nil, err
}
return &Instance{
- impl: impl,
- workdir: workdir,
- index: index,
+ impl: impl,
+ workdir: workdir,
+ timeouts: pool.timeouts,
+ index: index,
}, nil
}
@@ -175,7 +180,7 @@ func (inst *Instance) MonitorExecution(outc <-chan []byte, errc <-chan error,
exit: exit,
}
lastExecuteTime := time.Now()
- ticker := time.NewTicker(tickerPeriod)
+ ticker := time.NewTicker(tickerPeriod * inst.timeouts.Scale)
defer ticker.Stop()
for {
select {
@@ -240,16 +245,7 @@ func (inst *Instance) MonitorExecution(outc <-chan []byte, errc <-chan error,
case <-ticker.C:
// Detect both "no output whatsoever" and "kernel episodically prints
// something to console, but fuzzer is not actually executing programs".
- // The timeout used to be 3 mins for a long time.
- // But (1) we were seeing flakes on linux where net namespace
- // destruction can be really slow, and (2) gVisor watchdog timeout
- // is 3 mins + 1/4 of that for checking period = 3m45s.
- // Current linux max timeout is CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=140
- // and workqueue.watchdog_thresh=140 which both actually result
- // in 140-280s detection delay.
- // So the current timeout is 5 mins (300s).
- // We don't want it to be too long too because it will waste time on real hangs.
- if time.Since(lastExecuteTime) > NoOutputTimeout {
+ if time.Since(lastExecuteTime) > inst.timeouts.NoOutput {
return mon.extractError(noOutputCrash)
}
case <-Shutdown:
@@ -360,7 +356,6 @@ var (
beforeContext = 1024 << 10
afterContext = 128 << 10
- NoOutputTimeout = 5 * time.Minute
tickerPeriod = 10 * time.Second
waitForOutputTimeout = 10 * time.Second
)
diff --git a/vm/vm_test.go b/vm/vm_test.go
index da5d2b7af..ec87aeee3 100644
--- a/vm/vm_test.go
+++ b/vm/vm_test.go
@@ -73,7 +73,6 @@ func (inst *testInstance) Close() {
func init() {
beforeContext = maxErrorLength + 100
tickerPeriod = 1 * time.Second
- NoOutputTimeout = 5 * time.Second
waitForOutputTimeout = 3 * time.Second
ctor := func(env *vmimpl.Env) (vmimpl.Pool, error) {
@@ -344,6 +343,11 @@ func testMonitorExecution(t *testing.T, test *Test) {
TargetOS: targets.Linux,
TargetArch: targets.AMD64,
TargetVMArch: targets.AMD64,
+ Timeouts: targets.Timeouts{
+ Scale: 1,
+ Slowdown: 1,
+ NoOutput: 5 * time.Second,
+ },
},
Workdir: dir,
Type: "test",