diff options
| author | Aleksandr Nogikh <nogikh@google.com> | 2021-12-01 17:25:40 +0000 |
|---|---|---|
| committer | Aleksandr Nogikh <wp32pw@gmail.com> | 2021-12-10 12:30:07 +0100 |
| commit | 18f846ca807cfc6df9c3da3c0ab08251277dfefb (patch) | |
| tree | e14f783b914409f21ae77a01a6b74ededaba6901 | |
| parent | 52c8379f77b5f292e2d527c66dfe17a899381d20 (diff) | |
all: add the `rerun` call property
To be able to collide specific syscalls more precisely, we need to
repeat the process many times.
Introduce the `rerun` call property, which instructs `syz-executor` to
repeat the call the specified number of times. The intended use is:
call1() (rerun: 100, async)
call2() (rerun: 100)
For now, assign rerun values randomly to consecutive pairs of calls,
where the first one is async.
| -rw-r--r-- | executor/executor.cc | 9 | ||||
| -rw-r--r-- | pkg/csource/csource.go | 8 | ||||
| -rw-r--r-- | pkg/csource/csource_test.go | 5 | ||||
| -rw-r--r-- | prog/collide.go | 16 | ||||
| -rw-r--r-- | prog/encoding_test.go | 8 | ||||
| -rw-r--r-- | prog/encodingexec_test.go | 14 | ||||
| -rw-r--r-- | prog/minimization.go | 9 | ||||
| -rw-r--r-- | prog/minimization_test.go | 23 | ||||
| -rw-r--r-- | prog/prog.go | 1 | ||||
| -rw-r--r-- | syz-fuzzer/proc.go | 6 |
10 files changed, 88 insertions, 11 deletions
diff --git a/executor/executor.cc b/executor/executor.cc index 01b19b81e..feb47c814 100644 --- a/executor/executor.cc +++ b/executor/executor.cc @@ -1244,6 +1244,8 @@ void execute_call(thread_t* th) int fail_fd = -1; th->soft_fail_state = false; if (th->call_props.fail_nth > 0) { + if (th->call_props.rerun > 0) + fail("both fault injection and rerun are enabled for the same call"); fail_fd = inject_fault(th->call_props.fail_nth); th->soft_fail_state = true; } @@ -1272,12 +1274,19 @@ void execute_call(thread_t* th) if (th->call_props.fail_nth > 0) th->fault_injected = fault_injected(fail_fd); + // If required, run the syscall some more times. + // But let's still return res, errno and coverage from the first execution. + for (int i = 0; i < th->call_props.rerun; i++) + NONFAILING(execute_syscall(call, th->args)); + debug("#%d [%llums] <- %s=0x%llx errno=%d ", th->id, current_time_ms() - start_time_ms, call->name, (uint64)th->res, th->reserrno); if (flag_coverage) debug("cover=%u ", th->cov.size); if (th->call_props.fail_nth > 0) debug("fault=%d ", th->fault_injected); + if (th->call_props.rerun > 0) + debug("rerun=%d ", th->call_props.rerun); debug("\n"); } diff --git a/pkg/csource/csource.go b/pkg/csource/csource.go index 32e01ff1e..6dd8bdcd6 100644 --- a/pkg/csource/csource.go +++ b/pkg/csource/csource.go @@ -253,6 +253,14 @@ func (ctx *context) generateCalls(p prog.ExecProg, trace bool) ([]string, []uint ctx.emitCall(w, call, ci, resCopyout || argCopyout, trace) + if call.Props.Rerun > 0 { + // TODO: remove this legacy C89-style definition once we figure out what to do with Akaros. + fmt.Fprintf(w, "\t{\n\tint i;\n") + fmt.Fprintf(w, "\tfor(i = 0; i < %v; i++) {\n", call.Props.Rerun) + // Rerun invocations should not affect the result value. + ctx.emitCall(w, call, ci, false, false) + fmt.Fprintf(w, "\t\t}\n\t}\n") + } // Copyout. if resCopyout || argCopyout { ctx.copyout(w, call, resCopyout) diff --git a/pkg/csource/csource_test.go b/pkg/csource/csource_test.go index 885d75f67..fc3b21573 100644 --- a/pkg/csource/csource_test.go +++ b/pkg/csource/csource_test.go @@ -70,13 +70,16 @@ func testTarget(t *testing.T, target *prog.Target, full bool) { p.Calls = append(p.Calls, minimized.Calls...) opts = allOptionsPermutations(target.OS) } - // Test fault injection and async call generation as well. + // Test various call properties. if len(p.Calls) > 0 { p.Calls[0].Props.FailNth = 1 } if len(p.Calls) > 1 { p.Calls[1].Props.Async = true } + if len(p.Calls) > 2 { + p.Calls[2].Props.Rerun = 4 + } for opti, opts := range opts { if testing.Short() && opts.HandleSegv { // HandleSegv can radically increase compilation time/memory consumption on large programs. diff --git a/prog/collide.go b/prog/collide.go index cd059c60f..77065147f 100644 --- a/prog/collide.go +++ b/prog/collide.go @@ -55,3 +55,19 @@ func AssignRandomAsync(origProg *Prog, rand *rand.Rand) *Prog { return prog } + +var rerunSteps = []int{32, 64} + +func AssignRandomRerun(prog *Prog, rand *rand.Rand) { + for i := 0; i+1 < len(prog.Calls); i++ { + if !prog.Calls[i].Props.Async || rand.Intn(4) != 0 { + continue + } + // We assign rerun to consecutive pairs of calls, where the first call is async. + // TODO: consider assigning rerun also to non-collided progs. + rerun := rerunSteps[rand.Intn(len(rerunSteps))] + prog.Calls[i].Props.Rerun = rerun + prog.Calls[i+1].Props.Rerun = rerun + i++ + } +} diff --git a/prog/encoding_test.go b/prog/encoding_test.go index e0e6fbefd..4717c402b 100644 --- a/prog/encoding_test.go +++ b/prog/encoding_test.go @@ -421,7 +421,7 @@ func TestSerializeCallProps(t *testing.T) { }, { "serialize0(0x0) (fail_nth: 5)\n", - []CallProps{{5, false}}, + []CallProps{{5, false, 0}}, }, { "serialize0(0x0) (fail_nth)\n", @@ -433,7 +433,11 @@ func TestSerializeCallProps(t *testing.T) { }, { "serialize0(0x0) (async)\n", - []CallProps{{0, true}}, + []CallProps{{0, true, 0}}, + }, + { + "serialize0(0x0) (async, rerun: 10)\n", + []CallProps{{0, true, 10}}, }, } diff --git a/prog/encodingexec_test.go b/prog/encodingexec_test.go index fe6a4dfb4..879aed893 100644 --- a/prog/encodingexec_test.go +++ b/prog/encodingexec_test.go @@ -465,14 +465,14 @@ func TestSerializeForExec(t *testing.T) { { `test() (fail_nth: 3) test() (fail_nth: 4) -test() (async) +test() (async, rerun: 10) `, []uint64{ - execInstrSetProps, 3, 0, + execInstrSetProps, 3, 0, 0, callID("test"), ExecNoCopyout, 0, - execInstrSetProps, 4, 0, + execInstrSetProps, 4, 0, 0, callID("test"), ExecNoCopyout, 0, - execInstrSetProps, 0, 1, + execInstrSetProps, 0, 1, 10, callID("test"), ExecNoCopyout, 0, execInstrEOF, }, @@ -481,17 +481,17 @@ test() (async) { Meta: target.SyscallMap["test"], Index: ExecNoCopyout, - Props: CallProps{3, false}, + Props: CallProps{3, false, 0}, }, { Meta: target.SyscallMap["test"], Index: ExecNoCopyout, - Props: CallProps{4, false}, + Props: CallProps{4, false, 0}, }, { Meta: target.SyscallMap["test"], Index: ExecNoCopyout, - Props: CallProps{0, true}, + Props: CallProps{0, true, 10}, }, }, }, diff --git a/prog/minimization.go b/prog/minimization.go index 89ed6e142..26a4dfc93 100644 --- a/prog/minimization.go +++ b/prog/minimization.go @@ -120,6 +120,15 @@ func minimizeCallProps(p0 *Prog, callIndex, callIndex0 int, pred func(*Prog, int } } + // Try to drop rerun. + if props.Rerun > 0 { + p := p0.Clone() + p.Calls[callIndex].Props.Rerun = 0 + if pred(p, callIndex0) { + p0 = p + } + } + return p0 } diff --git a/prog/minimization_test.go b/prog/minimization_test.go index cf499b7f1..697937e92 100644 --- a/prog/minimization_test.go +++ b/prog/minimization_test.go @@ -8,6 +8,7 @@ import ( "testing" ) +// nolint:gocyclo func TestMinimize(t *testing.T) { tests := []struct { os string @@ -193,6 +194,28 @@ func TestMinimize(t *testing.T) { "pipe2(0x0, 0x0) (async)\n", -1, }, + // Clear unneeded rerun. + { + "linux", "amd64", + "pipe2(0x0, 0x0) (rerun: 100)\n", + -1, + func(p *Prog, callIndex int) bool { + return len(p.Calls) == 1 && p.Calls[0].Meta.Name == "pipe2" + }, + "pipe2(0x0, 0x0)\n", + -1, + }, + // Keep important rerun. + { + "linux", "amd64", + "pipe2(0x0, 0x0) (rerun: 100)\n", + -1, + func(p *Prog, callIndex int) bool { + return len(p.Calls) == 1 && p.Calls[0].Meta.Name == "pipe2" && p.Calls[0].Props.Rerun >= 100 + }, + "pipe2(0x0, 0x0) (rerun: 100)\n", + -1, + }, } t.Parallel() for ti, test := range tests { diff --git a/prog/prog.go b/prog/prog.go index 09da7fdf2..86e424299 100644 --- a/prog/prog.go +++ b/prog/prog.go @@ -21,6 +21,7 @@ type Prog struct { type CallProps struct { FailNth int `key:"fail_nth"` Async bool `key:"async"` + Rerun int `key:"rerun"` } type Call struct { diff --git a/syz-fuzzer/proc.go b/syz-fuzzer/proc.go index a232327de..55d718f88 100644 --- a/syz-fuzzer/proc.go +++ b/syz-fuzzer/proc.go @@ -288,7 +288,11 @@ func (proc *Proc) executeAndCollide(execOpts *ipc.ExecOpts, p *prog.Prog, flags } func (proc *Proc) randomCollide(origP *prog.Prog) *prog.Prog { - return prog.AssignRandomAsync(origP, proc.rnd) + p := prog.AssignRandomAsync(origP, proc.rnd) + if proc.rnd.Intn(2) != 0 { + prog.AssignRandomRerun(p, proc.rnd) + } + return p } func (proc *Proc) executeRaw(opts *ipc.ExecOpts, p *prog.Prog, stat Stat) *ipc.ProgInfo { |
