diff options
| author | Dmitry Vyukov <dvyukov@google.com> | 2020-11-25 09:17:50 +0100 |
|---|---|---|
| committer | Dmitry Vyukov <dvyukov@google.com> | 2020-12-28 14:22:41 +0100 |
| commit | cbd0445ec3b0b184db66966d8a47e6b37d13692e (patch) | |
| tree | 14ed47723c325ef1b388e4e732a70c0fed4fa101 /pkg | |
| parent | 2242f77fdc5a6c50bd8fa1021d2abc8b83e09e8d (diff) | |
all: make timeouts configurable
Add sys/targets.Timeouts struct that parametrizes timeouts throughout the system.
The struct allows to control syscall/program/no output timeouts for OS/arch/VM/etc.
See comment on the struct for more details.
Diffstat (limited to 'pkg')
| -rw-r--r-- | pkg/csource/csource.go | 8 | ||||
| -rw-r--r-- | pkg/csource/csource_test.go | 1 | ||||
| -rw-r--r-- | pkg/csource/options.go | 10 | ||||
| -rw-r--r-- | pkg/csource/options_test.go | 11 | ||||
| -rw-r--r-- | pkg/instance/instance.go | 97 | ||||
| -rw-r--r-- | pkg/instance/instance_test.go | 11 | ||||
| -rw-r--r-- | pkg/ipc/ipc.go | 12 | ||||
| -rw-r--r-- | pkg/ipc/ipc_test.go | 10 | ||||
| -rw-r--r-- | pkg/ipc/ipcconfig/ipcconfig.go | 4 | ||||
| -rw-r--r-- | pkg/mgrconfig/load.go | 20 | ||||
| -rw-r--r-- | pkg/repro/repro.go | 33 | ||||
| -rw-r--r-- | pkg/rpctype/rpc.go | 31 | ||||
| -rw-r--r-- | pkg/runtest/run.go | 2 |
13 files changed, 171 insertions, 79 deletions
diff --git a/pkg/csource/csource.go b/pkg/csource/csource.go index b53b3b97f..a01141567 100644 --- a/pkg/csource/csource.go +++ b/pkg/csource/csource.go @@ -29,6 +29,7 @@ import ( "regexp" "sort" "strings" + "time" "github.com/google/syzkaller/prog" "github.com/google/syzkaller/sys/targets" @@ -93,11 +94,12 @@ func Write(p *prog.Prog, opts Options) ([]byte, error) { replacements["SANDBOX_FUNC"] = replacements["SYSCALLS"] replacements["SYSCALLS"] = "unused" } - replacements["PROGRAM_TIMEOUT_MS"] = "5000" - timeoutExpr := "45" + timeouts := ctx.sysTarget.Timeouts(opts.Slowdown) + replacements["PROGRAM_TIMEOUT_MS"] = fmt.Sprint(int(timeouts.Program / time.Millisecond)) + timeoutExpr := fmt.Sprint(int(timeouts.Syscall / time.Millisecond)) for i, call := range p.Calls { if timeout := call.Meta.Attrs.Timeout; timeout != 0 { - timeoutExpr += fmt.Sprintf(" + (call == %d ? %d : 0)", i, timeout) + timeoutExpr += fmt.Sprintf(" + (call == %v ? %v : 0)", i, timeout*uint64(timeouts.Scale)) } } replacements["CALL_TIMEOUT_MS"] = timeoutExpr diff --git a/pkg/csource/csource_test.go b/pkg/csource/csource_test.go index f6d04b35c..951f9f09a 100644 --- a/pkg/csource/csource_test.go +++ b/pkg/csource/csource_test.go @@ -53,6 +53,7 @@ var executorOpts = Options{ Collide: true, Repeat: true, Procs: 2, + Slowdown: 1, Sandbox: "none", Repro: true, UseTmpDir: true, diff --git a/pkg/csource/options.go b/pkg/csource/options.go index ebea29fc2..3c1790483 100644 --- a/pkg/csource/options.go +++ b/pkg/csource/options.go @@ -23,6 +23,7 @@ type Options struct { Repeat bool `json:"repeat,omitempty"` RepeatTimes int `json:"repeat_times,omitempty"` // if non-0, repeat that many times Procs int `json:"procs"` + Slowdown int `json:"slowdown"` Sandbox string `json:"sandbox"` Fault bool `json:"fault,omitempty"` // inject fault into FaultCall/FaultNth @@ -154,6 +155,7 @@ func DefaultOpts(cfg *mgrconfig.Config) Options { Collide: true, Repeat: true, Procs: cfg.Procs, + Slowdown: cfg.Timeouts.Slowdown, Sandbox: cfg.Sandbox, UseTmpDir: true, HandleSegv: true, @@ -190,9 +192,11 @@ func (opts Options) Serialize() []byte { } func DeserializeOptions(data []byte) (Options, error) { - var opts Options - // Before CloseFDs was added, close_fds() was always called, so default to true. - opts.CloseFDs = true + opts := Options{ + Slowdown: 1, + // Before CloseFDs was added, close_fds() was always called, so default to true. + CloseFDs: true, + } if err := json.Unmarshal(data, &opts); err == nil { return opts, nil } diff --git a/pkg/csource/options_test.go b/pkg/csource/options_test.go index b9594e341..cd247fada 100644 --- a/pkg/csource/options_test.go +++ b/pkg/csource/options_test.go @@ -37,6 +37,7 @@ func TestParseOptionsCanned(t *testing.T) { Collide: true, Repeat: true, Procs: 10, + Slowdown: 1, Sandbox: "namespace", Fault: true, FaultCall: 1, @@ -59,6 +60,7 @@ func TestParseOptionsCanned(t *testing.T) { Collide: true, Repeat: true, Procs: 10, + Slowdown: 1, Sandbox: "android", Fault: true, FaultCall: 1, @@ -78,6 +80,7 @@ func TestParseOptionsCanned(t *testing.T) { Collide: true, Repeat: true, Procs: 1, + Slowdown: 1, Sandbox: "none", Fault: false, FaultCall: -1, @@ -95,6 +98,7 @@ func TestParseOptionsCanned(t *testing.T) { Collide: true, Repeat: true, Procs: 1, + Slowdown: 1, Sandbox: "", Fault: false, FaultCall: -1, @@ -112,6 +116,7 @@ func TestParseOptionsCanned(t *testing.T) { Collide: true, Repeat: true, Procs: 1, + Slowdown: 1, Sandbox: "namespace", Fault: false, FaultCall: -1, @@ -147,6 +152,7 @@ func allOptionsSingle(OS string) []Options { Repeat: true, Sandbox: "none", UseTmpDir: true, + Slowdown: 1, } opts = append(opts, enumerateField(OS, opt, i)...) } @@ -200,6 +206,11 @@ func enumerateField(OS string, opt Options, field int) []Options { fld.SetInt(times) opts = append(opts, opt) } + } else if fldName == "Slowdown" { + for _, val := range []int64{1, 10} { + fld.SetInt(val) + opts = append(opts, opt) + } } else if fldName == "FaultCall" { opts = append(opts, opt) } else if fldName == "FaultNth" { diff --git a/pkg/instance/instance.go b/pkg/instance/instance.go index 861d51e72..52881b353 100644 --- a/pkg/instance/instance.go +++ b/pkg/instance/instance.go @@ -21,6 +21,7 @@ import ( "github.com/google/syzkaller/pkg/mgrconfig" "github.com/google/syzkaller/pkg/osutil" "github.com/google/syzkaller/pkg/report" + "github.com/google/syzkaller/pkg/tool" "github.com/google/syzkaller/pkg/vcs" "github.com/google/syzkaller/sys/targets" "github.com/google/syzkaller/vm" @@ -33,7 +34,8 @@ type Env interface { } type env struct { - cfg *mgrconfig.Config + cfg *mgrconfig.Config + optionalFlags bool } func NewEnv(cfg *mgrconfig.Config) (Env, error) { @@ -53,20 +55,30 @@ func NewEnv(cfg *mgrconfig.Config) (Env, error) { return nil, fmt.Errorf("failed to create tmp dir: %v", err) } env := &env{ - cfg: cfg, + cfg: cfg, + optionalFlags: true, } return env, nil } -func (env *env) BuildSyzkaller(repo, commit string) error { +func (env *env) BuildSyzkaller(repoURL, commit string) error { cfg := env.cfg srcIndex := strings.LastIndex(cfg.Syzkaller, "/src/") if srcIndex == -1 { return fmt.Errorf("syzkaller path %q is not in GOPATH", cfg.Syzkaller) } - if _, err := vcs.NewSyzkallerRepo(cfg.Syzkaller).CheckoutCommit(repo, commit); err != nil { + repo := vcs.NewSyzkallerRepo(cfg.Syzkaller) + if _, err := repo.CheckoutCommit(repoURL, commit); err != nil { return fmt.Errorf("failed to checkout syzkaller repo: %v", err) } + // The following commit ("syz-fuzzer: support optional flags") adds support for optional flags + // in syz-fuzzer and syz-execprog. This is required to invoke older binaries with newer flags + // without failing due to unknown flags. + optionalFlags, err := repo.Contains("64435345f0891706a7e0c7885f5f7487581e6005") + if err != nil { + return fmt.Errorf("optional flags check failed: %v", err) + } + env.optionalFlags = optionalFlags cmd := osutil.Command(MakeBin, "target") cmd.Dir = cfg.Syzkaller cmd.Env = append([]string{}, os.Environ()...) @@ -210,13 +222,14 @@ func (env *env) Test(numVMs int, reproSyz, reproOpts, reproC []byte) ([]error, e res := make(chan error, numVMs) for i := 0; i < numVMs; i++ { inst := &inst{ - cfg: env.cfg, - reporter: reporter, - vmPool: vmPool, - vmIndex: i, - reproSyz: reproSyz, - reproOpts: reproOpts, - reproC: reproC, + cfg: env.cfg, + optionalFlags: env.optionalFlags, + reporter: reporter, + vmPool: vmPool, + vmIndex: i, + reproSyz: reproSyz, + reproOpts: reproOpts, + reproC: reproC, } go func() { res <- inst.test() }() } @@ -228,14 +241,15 @@ func (env *env) Test(numVMs int, reproSyz, reproOpts, reproC []byte) ([]error, e } type inst struct { - cfg *mgrconfig.Config - reporter report.Reporter - vmPool *vm.Pool - vm *vm.Instance - vmIndex int - reproSyz []byte - reproOpts []byte - reproC []byte + cfg *mgrconfig.Config + optionalFlags bool + reporter report.Reporter + vmPool *vm.Pool + vm *vm.Instance + vmIndex int + reproSyz []byte + reproOpts []byte + reproC []byte } func (inst *inst) test() error { @@ -318,8 +332,8 @@ func (inst *inst) testInstance() error { } cmd := OldFuzzerCmd(fuzzerBin, executorBin, targets.TestOS, inst.cfg.TargetOS, inst.cfg.TargetArch, fwdAddr, - inst.cfg.Sandbox, 0, inst.cfg.Cover, true) - outc, errc, err := inst.vm.Run(10*time.Minute, nil, cmd) + inst.cfg.Sandbox, 0, inst.cfg.Cover, true, inst.optionalFlags, inst.cfg.Timeouts.Slowdown) + outc, errc, err := inst.vm.Run(10*time.Minute*inst.cfg.Timeouts.Scale, nil, cmd) if err != nil { return fmt.Errorf("failed to run binary in VM: %v", err) } @@ -381,8 +395,9 @@ func (inst *inst) testRepro() error { opts.FaultCall = -1 } cmdSyz := ExecprogCmd(execprogBin, executorBin, cfg.TargetOS, cfg.TargetArch, opts.Sandbox, - true, true, true, cfg.Procs, opts.FaultCall, opts.FaultNth, vmProgFile) - if err := inst.testProgram(cmdSyz, 7*time.Minute); err != nil { + true, true, true, cfg.Procs, opts.FaultCall, opts.FaultNth, inst.optionalFlags, + cfg.Timeouts.Slowdown, vmProgFile) + if err := inst.testProgram(cmdSyz, cfg.Timeouts.NoOutputRunningTime); err != nil { return err } } @@ -398,9 +413,9 @@ func (inst *inst) testRepro() error { if err != nil { return &TestError{Title: fmt.Sprintf("failed to copy test binary to VM: %v", err)} } - // We should test for longer (e.g. 5 mins), but the problem is that - // reproducer does not print anything, so after 3 mins we detect "no output". - return inst.testProgram(vmBin, time.Minute) + // We should test for more than full "no output" timeout, but the problem is that C reproducers + // don't print anything, so we will get a false "no output" crash. + return inst.testProgram(vmBin, cfg.Timeouts.NoOutput/2) } func (inst *inst) testProgram(command string, testTime time.Duration) error { @@ -420,7 +435,7 @@ func (inst *inst) testProgram(command string, testTime time.Duration) error { } func FuzzerCmd(fuzzer, executor, name, OS, arch, fwdAddr, sandbox string, procs, verbosity int, - cover, debug, test, runtest bool) string { + cover, debug, test, runtest, optionalFlags bool, slowdown int) string { osArg := "" if targets.Get(OS, arch).HostFuzzer { // Only these OSes need the flag, because the rest assume host OS. @@ -436,18 +451,26 @@ func FuzzerCmd(fuzzer, executor, name, OS, arch, fwdAddr, sandbox string, procs, if verbosity != 0 { verbosityArg = fmt.Sprintf(" -vv=%v", verbosity) } + optionalArg := "" + if optionalFlags { + optionalArg = " " + tool.OptionalFlags([]tool.Flag{ + {Name: "slowdown", Value: fmt.Sprint(slowdown)}, + }) + } return fmt.Sprintf("%v -executor=%v -name=%v -arch=%v%v -manager=%v -sandbox=%v"+ - " -procs=%v -cover=%v -debug=%v -test=%v%v%v", + " -procs=%v -cover=%v -debug=%v -test=%v%v%v%v", fuzzer, executor, name, arch, osArg, fwdAddr, sandbox, - procs, cover, debug, test, runtestArg, verbosityArg) + procs, cover, debug, test, runtestArg, verbosityArg, optionalArg) } -func OldFuzzerCmd(fuzzer, executor, name, OS, arch, fwdAddr, sandbox string, procs int, cover, test bool) string { - return FuzzerCmd(fuzzer, executor, name, OS, arch, fwdAddr, sandbox, procs, 0, cover, false, test, false) +func OldFuzzerCmd(fuzzer, executor, name, OS, arch, fwdAddr, sandbox string, procs int, + cover, test, optionalFlags bool, slowdown int) string { + return FuzzerCmd(fuzzer, executor, name, OS, arch, fwdAddr, sandbox, procs, 0, cover, false, test, false, + optionalFlags, slowdown) } func ExecprogCmd(execprog, executor, OS, arch, sandbox string, repeat, threaded, collide bool, - procs, faultCall, faultNth int, progFile string) string { + procs, faultCall, faultNth int, optionalFlags bool, slowdown int, progFile string) string { repeatCount := 1 if repeat { repeatCount = 0 @@ -456,12 +479,18 @@ func ExecprogCmd(execprog, executor, OS, arch, sandbox string, repeat, threaded, if targets.Get(OS, arch).HostFuzzer { osArg = " -os=" + OS } + optionalArg := "" + if optionalFlags { + optionalArg = " " + tool.OptionalFlags([]tool.Flag{ + {Name: "slowdown", Value: fmt.Sprint(slowdown)}, + }) + } return fmt.Sprintf("%v -executor=%v -arch=%v%v -sandbox=%v"+ " -procs=%v -repeat=%v -threaded=%v -collide=%v -cover=0"+ - " -fault_call=%v -fault_nth=%v %v", + " -fault_call=%v -fault_nth=%v%v %v", execprog, executor, arch, osArg, sandbox, procs, repeatCount, threaded, collide, - faultCall, faultNth, progFile) + faultCall, faultNth, optionalArg, progFile) } var MakeBin = func() string { diff --git a/pkg/instance/instance_test.go b/pkg/instance/instance_test.go index e19f91faf..d95b6623e 100644 --- a/pkg/instance/instance_test.go +++ b/pkg/instance/instance_test.go @@ -10,6 +10,7 @@ import ( "strings" "testing" + "github.com/google/syzkaller/pkg/tool" "github.com/google/syzkaller/sys/targets" ) @@ -32,7 +33,7 @@ func TestFuzzerCmd(t *testing.T) { flagDebug := flags.Bool("debug", false, "debug output from executor") flagV := flags.Int("v", 0, "verbosity") cmdLine := OldFuzzerCmd(os.Args[0], "/myexecutor", "myname", targets.Linux, targets.I386, "localhost:1234", - "namespace", 3, true, true) + "namespace", 3, true, true, false, 0) args := strings.Split(cmdLine, " ")[1:] if err := flags.Parse(args); err != nil { t.Fatal(err) @@ -93,10 +94,11 @@ func TestExecprogCmd(t *testing.T) { flagCollide := flags.Bool("collide", true, "collide syscalls to provoke data races") flagSignal := flags.Bool("cover", false, "collect feedback signals (coverage)") flagSandbox := flags.String("sandbox", "none", "sandbox for fuzzing (none/setuid/namespace)") + flagSlowdown := flags.Int("slowdown", 1, "") cmdLine := ExecprogCmd(os.Args[0], "/myexecutor", targets.FreeBSD, targets.I386, - "namespace", true, false, false, 7, 2, 3, "myprog") + "namespace", true, false, false, 7, 2, 3, true, 10, "myprog") args := strings.Split(cmdLine, " ")[1:] - if err := flags.Parse(args); err != nil { + if err := tool.ParseFlags(flags, args); err != nil { t.Fatal(err) } if len(flags.Args()) != 1 || flags.Arg(0) != "myprog" { @@ -135,4 +137,7 @@ func TestExecprogCmd(t *testing.T) { if *flagCollide { t.Errorf("bad collide: %v, want: %v", *flagCollide, false) } + if *flagSlowdown != 10 { + t.Errorf("bad slowdown: %v, want: %v", *flagSlowdown, 10) + } } diff --git a/pkg/ipc/ipc.go b/pkg/ipc/ipc.go index 0f1bd50a6..7e0bf7e94 100644 --- a/pkg/ipc/ipc.go +++ b/pkg/ipc/ipc.go @@ -73,6 +73,8 @@ type Config struct { // Flags are configuation flags, defined above. Flags EnvFlags + + Timeouts targets.Timeouts } type CallFlags uint32 @@ -154,6 +156,10 @@ func FlagsToSandbox(flags EnvFlags) string { } func MakeEnv(config *Config, pid int) (*Env, error) { + if config.Timeouts.Slowdown == 0 || config.Timeouts.Scale == 0 || + config.Timeouts.Syscall == 0 || config.Timeouts.Program == 0 { + return nil, fmt.Errorf("ipc.MakeEnv: uninitialized timeouts (%+v)", config.Timeouts) + } var inf, outf *os.File var inmem, outmem []byte if config.UseShmem { @@ -731,9 +737,9 @@ func (c *command) exec(opts *ExecOpts, progData []byte) (output []byte, hanged b pid: uint64(c.pid), faultCall: uint64(opts.FaultCall), faultNth: uint64(opts.FaultNth), - syscallTimeoutMS: 50, - programTimeoutMS: 5000, - slowdownScale: 1, + syscallTimeoutMS: uint64(c.config.Timeouts.Syscall / time.Millisecond), + programTimeoutMS: uint64(c.config.Timeouts.Program / time.Millisecond), + slowdownScale: uint64(c.config.Timeouts.Scale), progSize: uint64(len(progData)), } reqData := (*[unsafe.Sizeof(*req)]byte)(unsafe.Pointer(req))[:] diff --git a/pkg/ipc/ipc_test.go b/pkg/ipc/ipc_test.go index e5e919a89..44fdb67bd 100644 --- a/pkg/ipc/ipc_test.go +++ b/pkg/ipc/ipc_test.go @@ -30,7 +30,7 @@ func buildExecutor(t *testing.T, target *prog.Target) string { return bin } -func initTest(t *testing.T) (*prog.Target, rand.Source, int, bool, bool) { +func initTest(t *testing.T) (*prog.Target, rand.Source, int, bool, bool, targets.Timeouts) { t.Parallel() iters := 100 if testing.Short() { @@ -50,7 +50,7 @@ func initTest(t *testing.T) (*prog.Target, rand.Source, int, bool, bool) { if err != nil { t.Fatal(err) } - return target, rs, iters, cfg.UseShmem, cfg.UseForkServer + return target, rs, iters, cfg.UseShmem, cfg.UseForkServer, cfg.Timeouts } // TestExecutor runs all internal executor unit tests. @@ -82,7 +82,7 @@ func TestExecutor(t *testing.T) { } func TestExecute(t *testing.T) { - target, _, _, useShmem, useForkServer := initTest(t) + target, _, _, useShmem, useForkServer, timeouts := initTest(t) bin := buildExecutor(t, target) defer os.Remove(bin) @@ -94,6 +94,7 @@ func TestExecute(t *testing.T) { Executor: bin, UseShmem: useShmem, UseForkServer: useForkServer, + Timeouts: timeouts, } env, err := MakeEnv(cfg, 0) if err != nil { @@ -127,13 +128,14 @@ func TestExecute(t *testing.T) { } func TestParallel(t *testing.T) { - target, _, _, useShmem, useForkServer := initTest(t) + target, _, _, useShmem, useForkServer, timeouts := initTest(t) bin := buildExecutor(t, target) defer os.Remove(bin) cfg := &Config{ Executor: bin, UseShmem: useShmem, UseForkServer: useForkServer, + Timeouts: timeouts, } const P = 10 errs := make(chan error, P) diff --git a/pkg/ipc/ipcconfig/ipcconfig.go b/pkg/ipc/ipcconfig/ipcconfig.go index a99cbdadc..3791322f2 100644 --- a/pkg/ipc/ipcconfig/ipcconfig.go +++ b/pkg/ipc/ipcconfig/ipcconfig.go @@ -18,11 +18,14 @@ var ( flagSignal = flag.Bool("cover", false, "collect feedback signals (coverage)") flagSandbox = flag.String("sandbox", "none", "sandbox for fuzzing (none/setuid/namespace/android)") flagDebug = flag.Bool("debug", false, "debug output from executor") + flagSlowdown = flag.Int("slowdown", 1, "execution slowdown caused by emulation/instrumentation") ) func Default(target *prog.Target) (*ipc.Config, *ipc.ExecOpts, error) { + sysTarget := targets.Get(target.OS, target.Arch) c := &ipc.Config{ Executor: *flagExecutor, + Timeouts: sysTarget.Timeouts(*flagSlowdown), } if *flagSignal { c.Flags |= ipc.FlagSignal @@ -35,7 +38,6 @@ func Default(target *prog.Target) (*ipc.Config, *ipc.ExecOpts, error) { return nil, nil, err } c.Flags |= sandboxFlags - sysTarget := targets.Get(target.OS, target.Arch) c.UseShmem = sysTarget.ExecutorUsesShmem c.UseForkServer = sysTarget.ExecutorUsesForkServer opts := &ipc.ExecOpts{ diff --git a/pkg/mgrconfig/load.go b/pkg/mgrconfig/load.go index a11de44be..9814987a7 100644 --- a/pkg/mgrconfig/load.go +++ b/pkg/mgrconfig/load.go @@ -9,6 +9,7 @@ import ( "os" "path/filepath" "regexp" + "runtime" "strings" "github.com/google/syzkaller/pkg/config" @@ -34,6 +35,7 @@ type Derived struct { ExecutorBin string Syscalls []int + Timeouts targets.Timeouts } func LoadData(data []byte) (*Config, error) { @@ -171,9 +173,27 @@ func Complete(cfg *Config) error { if err != nil { return err } + cfg.initTimeouts() return nil } +func (cfg *Config) initTimeouts() { + slowdown := 1 + switch { + case cfg.Type == "qemu" && runtime.GOARCH != cfg.SysTarget.Arch && runtime.GOARCH != cfg.SysTarget.VMArch: + // Assuming qemu emulation. + // Quick tests of mmap syscall on arm64 show ~9x slowdown. + slowdown = 10 + case cfg.Type == "gvisor" && cfg.Cover && strings.Contains(cfg.Name, "-race"): + // Go coverage+race has insane slowdown of ~350x. We can't afford such large value, + // but a smaller value should be enough to finish at least some syscalls. + // Note: the name check is a hack. + slowdown = 10 + } + // Note: we could also consider heavy debug tools (KASAN/KMSAN/KCSAN/KMEMLEAK) if necessary. + cfg.Timeouts = cfg.SysTarget.Timeouts(slowdown) +} + func checkNonEmpty(fields ...string) error { for i := 0; i < len(fields); i += 2 { if fields[i] == "" { diff --git a/pkg/repro/repro.go b/pkg/repro/repro.go index ad0d7c4df..aca892cb9 100644 --- a/pkg/repro/repro.go +++ b/pkg/repro/repro.go @@ -49,10 +49,11 @@ type context struct { crashType report.Type instances chan *instance bootRequests chan int - timeouts []time.Duration + testTimeouts []time.Duration startOpts csource.Options stats *Stats report *report.Report + timeouts targets.Timeouts } type instance struct { @@ -78,10 +79,11 @@ func Run(crashLog []byte, cfg *mgrconfig.Config, features *host.Features, report crashTitle = rep.Title crashType = rep.Type } - // The shortest duration is 10 seconds to detect simple crashes (i.e. no races and no hangs). - // The longest duration is 6 minutes to catch races and hangs. - noOutputTimeout := vm.NoOutputTimeout + time.Minute - timeouts := []time.Duration{15 * time.Second, time.Minute, noOutputTimeout} + testTimeouts := []time.Duration{ + 3 * cfg.Timeouts.Program, // to catch simpler crashes (i.e. no races and no hangs) + 20 * cfg.Timeouts.Program, + cfg.Timeouts.NoOutputRunningTime, // to catch "no output", races and hangs + } switch { case crashTitle == "": crashTitle = "no output/lost connection" @@ -89,12 +91,12 @@ func Run(crashLog []byte, cfg *mgrconfig.Config, features *host.Features, report // but theoretically if it's caused by a race it may need the largest timeout. // No output can only be reproduced with the max timeout. // As a compromise we use the smallest and the largest timeouts. - timeouts = []time.Duration{15 * time.Second, noOutputTimeout} + testTimeouts = []time.Duration{testTimeouts[0], testTimeouts[2]} case crashType == report.MemoryLeak: // Memory leaks can't be detected quickly because of expensive setup and scanning. - timeouts = []time.Duration{time.Minute, noOutputTimeout} + testTimeouts = testTimeouts[1:] case crashType == report.Hang: - timeouts = []time.Duration{noOutputTimeout} + testTimeouts = testTimeouts[2:] } ctx := &context{ target: cfg.SysTarget, @@ -103,11 +105,12 @@ func Run(crashLog []byte, cfg *mgrconfig.Config, features *host.Features, report crashType: crashType, instances: make(chan *instance, len(vmIndexes)), bootRequests: make(chan int, len(vmIndexes)), - timeouts: timeouts, + testTimeouts: testTimeouts, startOpts: createStartOptions(cfg, features, crashType), stats: new(Stats), + timeouts: cfg.Timeouts, } - ctx.reproLogf(0, "%v programs, %v VMs, timeouts %v", len(entries), len(vmIndexes), timeouts) + ctx.reproLogf(0, "%v programs, %v VMs, timeouts %v", len(entries), len(vmIndexes), testTimeouts) var wg sync.WaitGroup wg.Add(len(vmIndexes)) for _, vmIndex := range vmIndexes { @@ -308,7 +311,7 @@ func (ctx *context) extractProg(entries []*prog.LogEntry) (*Result, error) { for i := len(indices) - 1; i >= 0; i-- { lastEntries = append(lastEntries, entries[indices[i]]) } - for _, timeout := range ctx.timeouts { + for _, timeout := range ctx.testTimeouts { // Execute each program separately to detect simple crashes caused by a single program. // Programs are executed in reverse order, usually the last program is the guilty one. res, err := ctx.extractProgSingle(lastEntries, timeout) @@ -484,7 +487,7 @@ func (ctx *context) simplifyProg(res *Result) (*Result, error) { for _, simplify := range progSimplifies { opts := res.Opts - if !simplify(&opts) || !checkOpts(&opts, res.Duration) { + if !simplify(&opts) || !checkOpts(&opts, ctx.timeouts, res.Duration) { continue } crashed, err := ctx.testProg(res.Prog, res.Duration, opts) @@ -534,7 +537,7 @@ func (ctx *context) simplifyC(res *Result) (*Result, error) { for _, simplify := range cSimplifies { opts := res.Opts - if !simplify(&opts) || !checkOpts(&opts, res.Duration) { + if !simplify(&opts) || !checkOpts(&opts, ctx.timeouts, res.Duration) { continue } crashed, err := ctx.testCProg(res.Prog, res.Duration, opts) @@ -549,7 +552,7 @@ func (ctx *context) simplifyC(res *Result) (*Result, error) { return res, nil } -func checkOpts(opts *csource.Options, timeout time.Duration) bool { +func checkOpts(opts *csource.Options, timeouts targets.Timeouts, timeout time.Duration) bool { if !opts.Repeat && timeout >= time.Minute { // If we have a non-repeating C reproducer with timeout > vm.NoOutputTimeout and it hangs // (the reproducer itself does not terminate on its own, note: it does not have builtin timeout), @@ -621,7 +624,7 @@ func (ctx *context) testProgs(entries []*prog.LogEntry, duration time.Duration, command := instancePkg.ExecprogCmd(inst.execprogBin, inst.executorBin, ctx.target.OS, ctx.target.Arch, opts.Sandbox, opts.Repeat, - opts.Threaded, opts.Collide, opts.Procs, -1, -1, vmProgFile) + opts.Threaded, opts.Collide, opts.Procs, -1, -1, true, ctx.timeouts.Slowdown, vmProgFile) ctx.reproLogf(2, "testing program (duration=%v, %+v): %s", duration, opts, program) ctx.reproLogf(3, "detailed listing:\n%s", pstr) return ctx.testImpl(inst.Instance, command, duration) diff --git a/pkg/rpctype/rpc.go b/pkg/rpctype/rpc.go index a7900bb13..76c4c3f9c 100644 --- a/pkg/rpctype/rpc.go +++ b/pkg/rpctype/rpc.go @@ -43,7 +43,7 @@ func (serv *RPCServer) Serve() { log.Logf(0, "failed to accept an rpc connection: %v", err) continue } - setupKeepAlive(conn, 10*time.Second) + setupKeepAlive(conn, time.Minute) go serv.s.ServeConn(newFlateConn(conn)) } } @@ -53,39 +53,44 @@ func (serv *RPCServer) Addr() net.Addr { } type RPCClient struct { - conn net.Conn - c *rpc.Client + conn net.Conn + c *rpc.Client + timeScale time.Duration } -func Dial(addr string) (net.Conn, error) { +func Dial(addr string, timeScale time.Duration) (net.Conn, error) { + if timeScale <= 0 { + return nil, fmt.Errorf("bad rpc time scale %v", timeScale) + } var conn net.Conn var err error if addr == "stdin" { // This is used by vm/gvisor which passes us a unix socket connection in stdin. return net.FileConn(os.Stdin) } - if conn, err = net.DialTimeout("tcp", addr, 60*time.Second); err != nil { + if conn, err = net.DialTimeout("tcp", addr, time.Minute*timeScale); err != nil { return nil, err } - setupKeepAlive(conn, time.Minute) + setupKeepAlive(conn, time.Minute*timeScale) return conn, nil } -func NewRPCClient(addr string) (*RPCClient, error) { - conn, err := Dial(addr) +func NewRPCClient(addr string, timeScale time.Duration) (*RPCClient, error) { + conn, err := Dial(addr, timeScale) if err != nil { return nil, err } cli := &RPCClient{ - conn: conn, - c: rpc.NewClient(newFlateConn(conn)), + conn: conn, + c: rpc.NewClient(newFlateConn(conn)), + timeScale: timeScale, } return cli, nil } func (cli *RPCClient) Call(method string, args, reply interface{}) error { // Note: SetDeadline is not implemented on fuchsia, so don't fail on error. - cli.conn.SetDeadline(time.Now().Add(5 * 60 * time.Second)) + cli.conn.SetDeadline(time.Now().Add(3 * time.Minute * cli.timeScale)) defer cli.conn.SetDeadline(time.Time{}) return cli.c.Call(method, args, reply) } @@ -94,8 +99,8 @@ func (cli *RPCClient) Close() { cli.c.Close() } -func RPCCall(addr, method string, args, reply interface{}) error { - c, err := NewRPCClient(addr) +func RPCCall(addr string, timeScale time.Duration, method string, args, reply interface{}) error { + c, err := NewRPCClient(addr, timeScale) if err != nil { return err } diff --git a/pkg/runtest/run.go b/pkg/runtest/run.go index 8366b8fb3..4d81e2b60 100644 --- a/pkg/runtest/run.go +++ b/pkg/runtest/run.go @@ -376,6 +376,7 @@ func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bo opts := new(ipc.ExecOpts) cfg.UseShmem = sysTarget.ExecutorUsesShmem cfg.UseForkServer = sysTarget.ExecutorUsesForkServer + cfg.Timeouts = sysTarget.Timeouts(1) sandboxFlags, err := ipc.SandboxToFlags(sandbox) if err != nil { return nil, err @@ -427,6 +428,7 @@ func (ctx *Context) createCTest(p *prog.Prog, sandbox string, threaded bool, tim Repeat: times > 1, RepeatTimes: times, Procs: 1, + Slowdown: 1, Sandbox: sandbox, UseTmpDir: true, HandleSegv: true, |
