diff options
| author | Aleksandr Nogikh <nogikh@google.com> | 2021-09-23 16:15:41 +0000 |
|---|---|---|
| committer | Aleksandr Nogikh <wp32pw@gmail.com> | 2021-12-10 12:30:07 +0100 |
| commit | fd8caa5462e64f37cb9eebd75ffca1737dde447d (patch) | |
| tree | bfa900ebf41099b21476e72acdf063ee630178c9 /prog | |
| parent | 4d4ce9bc2a12073dcc8b917f9fc2a4ecba26c4c5 (diff) | |
all: replace collide mode by `async` call property
Replace the currently existing straightforward approach to race triggering
(that was almost entirely implemented inside syz-executor) with a more
flexible one.
The `async` call property instructs syz-executor not to block until the
call has completed execution and proceed immediately to the next call.
The decision on what calls to mark with `async` is made by syz-fuzzer.
Ultimately this should let us implement more intelligent race provoking
strategies as well as make more fine-grained reproducers.
Diffstat (limited to 'prog')
| -rw-r--r-- | prog/analysis.go | 29 | ||||
| -rw-r--r-- | prog/collide.go | 57 | ||||
| -rw-r--r-- | prog/collide_test.go | 84 | ||||
| -rw-r--r-- | prog/decodeexec.go | 4 | ||||
| -rw-r--r-- | prog/encoding.go | 3 | ||||
| -rw-r--r-- | prog/encoding_test.go | 6 | ||||
| -rw-r--r-- | prog/encodingexec.go | 8 | ||||
| -rw-r--r-- | prog/encodingexec_test.go | 16 | ||||
| -rw-r--r-- | prog/minimization.go | 30 | ||||
| -rw-r--r-- | prog/minimization_test.go | 22 | ||||
| -rw-r--r-- | prog/parse_test.go | 4 | ||||
| -rw-r--r-- | prog/prog.go | 3 |
12 files changed, 245 insertions, 21 deletions
diff --git a/prog/analysis.go b/prog/analysis.go index 6643941ff..697e1eab5 100644 --- a/prog/analysis.go +++ b/prog/analysis.go @@ -160,29 +160,34 @@ func foreachArgImpl(arg Arg, ctx *ArgCtx, f func(Arg, *ArgCtx)) { } } -func RequiredFeatures(p *Prog) (bitmasks, csums bool) { +type RequiredFeatures struct { + Bitmasks bool + Csums bool + FaultInjection bool + Async bool +} + +func (p *Prog) RequiredFeatures() RequiredFeatures { + features := RequiredFeatures{} for _, c := range p.Calls { ForeachArg(c, func(arg Arg, _ *ArgCtx) { if a, ok := arg.(*ConstArg); ok { if a.Type().BitfieldOffset() != 0 || a.Type().BitfieldLength() != 0 { - bitmasks = true + features.Bitmasks = true } } if _, ok := arg.Type().(*CsumType); ok { - csums = true + features.Csums = true } }) - } - return -} - -func (p *Prog) HasFaultInjection() bool { - for _, call := range p.Calls { - if call.Props.FailNth > 0 { - return true + if c.Props.FailNth > 0 { + features.FaultInjection = true + } + if c.Props.Async { + features.Async = true } } - return false + return features } type CallFlags int diff --git a/prog/collide.go b/prog/collide.go new file mode 100644 index 000000000..cd059c60f --- /dev/null +++ b/prog/collide.go @@ -0,0 +1,57 @@ +// Copyright 2021 syzkaller project authors. All rights reserved. +// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. + +// Contains prog transformations that intend to trigger more races. + +package prog + +import "math/rand" + +// The executor has no more than 32 threads that are used both for async calls and for calls +// that timed out. If we just ignore that limit, we could end up generating programs that +// would force the executor to fail and thus stall the fuzzing process. +// As an educated guess, let's use no more than 24 async calls to let executor handle everything. +const maxAsyncPerProg = 24 + +// Ensures that if an async call produces a resource, then +// it is distanced from a call consuming the resource at least +// by one non-async call. +// This does not give 100% guarantee that the async call finishes +// by that time, but hopefully this is enough for most cases. +func AssignRandomAsync(origProg *Prog, rand *rand.Rand) *Prog { + var unassigned map[*ResultArg]bool + leftAsync := maxAsyncPerProg + prog := origProg.Clone() + for i := len(prog.Calls) - 1; i >= 0 && leftAsync > 0; i-- { + call := prog.Calls[i] + producesUnassigned := false + consumes := make(map[*ResultArg]bool) + ForeachArg(call, func(arg Arg, ctx *ArgCtx) { + res, ok := arg.(*ResultArg) + if !ok { + return + } + if res.Dir() != DirIn && unassigned[res] { + // If this call is made async, at least one of the resources + // will be empty when it's needed. + producesUnassigned = true + } + if res.Dir() != DirOut { + consumes[res.Res] = true + } + }) + // Make async with a 66% chance (but never the last call). + if !producesUnassigned && i+1 != len(prog.Calls) && rand.Intn(3) != 0 { + call.Props.Async = true + for res := range consumes { + unassigned[res] = true + } + leftAsync-- + } else { + call.Props.Async = false + unassigned = consumes + } + } + + return prog +} diff --git a/prog/collide_test.go b/prog/collide_test.go new file mode 100644 index 000000000..614b677ef --- /dev/null +++ b/prog/collide_test.go @@ -0,0 +1,84 @@ +// Copyright 2021 syzkaller project authors. All rights reserved. +// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. + +package prog + +import ( + "math/rand" + "testing" +) + +func TestAssignRandomAsync(t *testing.T) { + tests := []struct { + os string + arch string + orig string + check func(*Prog) bool + }{ + { + "linux", "amd64", + `r0 = openat(0xffffffffffffff9c, &AUTO='./file1\x00', 0x42, 0x1ff) +write(r0, &AUTO="01010101", 0x4) +read(r0, &AUTO=""/4, 0x4) +close(r0) +`, + func(p *Prog) bool { + return !p.Calls[0].Props.Async + }, + }, + { + "linux", "amd64", + `r0 = openat(0xffffffffffffff9c, &AUTO='./file1\x00', 0x42, 0x1ff) +nanosleep(&AUTO={0x0,0x4C4B40}, &AUTO={0,0}) +write(r0, &AUTO="01010101", 0x4) +read(r0, &AUTO=""/4, 0x4) +close(r0) +`, + func(p *Prog) bool { + return !p.Calls[0].Props.Async || !p.Calls[1].Props.Async + }, + }, + { + "linux", "amd64", + `r0 = openat(0xffffffffffffff9c, &AUTO='./file1\x00', 0x42, 0x1ff) +r1 = dup(r0) +r2 = dup(r1) +r3 = dup(r2) +r4 = dup(r3) +`, + func(p *Prog) bool { + for _, call := range p.Calls[0 : len(p.Calls)-1] { + if call.Props.Async { + return false + } + } + return true + }, + }, + } + _, rs, iters := initTest(t) + r := rand.New(rs) + anyAsync := false + for _, test := range tests { + target, err := GetTarget(test.os, test.arch) + if err != nil { + t.Fatal(err) + } + p, err := target.Deserialize([]byte(test.orig), Strict) + if err != nil { + t.Fatal(err) + } + for i := 0; i < iters; i++ { + collided := AssignRandomAsync(p, r) + if !test.check(collided) { + t.Fatalf("bad async assignment:\n%s\n", collided.Serialize()) + } + for _, call := range collided.Calls { + anyAsync = anyAsync || call.Props.Async + } + } + } + if !anyAsync { + t.Fatalf("not a single async was assigned") + } +} diff --git a/prog/decodeexec.go b/prog/decodeexec.go index de62879f7..db89aa82e 100644 --- a/prog/decodeexec.go +++ b/prog/decodeexec.go @@ -145,6 +145,10 @@ func (dec *execDecoder) readCallProps(props *CallProps) { switch kind := value.Kind(); kind { case reflect.Int: value.SetInt(int64(arg)) + case reflect.Bool: + if arg == 1 { + value.SetBool(true) + } default: panic("Unsupported (yet) kind: " + kind.String()) } diff --git a/prog/encoding.go b/prog/encoding.go index 7c8cf6878..98dee8a8e 100644 --- a/prog/encoding.go +++ b/prog/encoding.go @@ -99,6 +99,7 @@ func (ctx *serializer) call(c *Call) { switch kind := value.Kind(); kind { case reflect.Int: ctx.printf(": %d", value.Int()) + case reflect.Bool: default: panic("unable to serialize call prop of type " + kind.String()) } @@ -376,6 +377,8 @@ func (p *parser) parseCallProps() CallProps { } else { value.SetInt(intV) } + case reflect.Bool: + value.SetBool(true) default: panic("unable to handle call props of type " + kind.String()) } diff --git a/prog/encoding_test.go b/prog/encoding_test.go index 11e71867f..e0e6fbefd 100644 --- a/prog/encoding_test.go +++ b/prog/encoding_test.go @@ -421,7 +421,7 @@ func TestSerializeCallProps(t *testing.T) { }, { "serialize0(0x0) (fail_nth: 5)\n", - []CallProps{{5}}, + []CallProps{{5, false}}, }, { "serialize0(0x0) (fail_nth)\n", @@ -431,6 +431,10 @@ func TestSerializeCallProps(t *testing.T) { "serialize0(0x0) (fail_nth: \"5\")\n", nil, }, + { + "serialize0(0x0) (async)\n", + []CallProps{{0, true}}, + }, } for _, test := range tests { diff --git a/prog/encodingexec.go b/prog/encodingexec.go index fea114717..44a49fc58 100644 --- a/prog/encodingexec.go +++ b/prog/encodingexec.go @@ -134,12 +134,18 @@ type argInfo struct { func (w *execContext) writeCallProps(props CallProps) { w.write(execInstrSetProps) props.ForeachProp(func(_, _ string, value reflect.Value) { + var uintVal uint64 switch kind := value.Kind(); kind { case reflect.Int: - w.write(uint64(value.Int())) + uintVal = uint64(value.Int()) + case reflect.Bool: + if value.Bool() { + uintVal = 1 + } default: panic("Unsupported (yet) kind: " + kind.String()) } + w.write(uintVal) }) } diff --git a/prog/encodingexec_test.go b/prog/encodingexec_test.go index 5e0f73ce6..fe6a4dfb4 100644 --- a/prog/encodingexec_test.go +++ b/prog/encodingexec_test.go @@ -465,11 +465,14 @@ func TestSerializeForExec(t *testing.T) { { `test() (fail_nth: 3) test() (fail_nth: 4) +test() (async) `, []uint64{ - execInstrSetProps, 3, + execInstrSetProps, 3, 0, callID("test"), ExecNoCopyout, 0, - execInstrSetProps, 4, + execInstrSetProps, 4, 0, + callID("test"), ExecNoCopyout, 0, + execInstrSetProps, 0, 1, callID("test"), ExecNoCopyout, 0, execInstrEOF, }, @@ -478,12 +481,17 @@ test() (fail_nth: 4) { Meta: target.SyscallMap["test"], Index: ExecNoCopyout, - Props: CallProps{3}, + Props: CallProps{3, false}, + }, + { + Meta: target.SyscallMap["test"], + Index: ExecNoCopyout, + Props: CallProps{4, false}, }, { Meta: target.SyscallMap["test"], Index: ExecNoCopyout, - Props: CallProps{4}, + Props: CallProps{0, true}, }, }, }, diff --git a/prog/minimization.go b/prog/minimization.go index 60a715b66..89ed6e142 100644 --- a/prog/minimization.go +++ b/prog/minimization.go @@ -5,6 +5,7 @@ package prog import ( "fmt" + "reflect" ) // Minimize minimizes program p into an equivalent program using the equivalence @@ -28,6 +29,9 @@ func Minimize(p0 *Prog, callIndex0 int, crash bool, pred0 func(*Prog, int) bool) // Try to remove all calls except the last one one-by-one. p0, callIndex0 = removeCalls(p0, callIndex0, crash, pred) + // Try to reset all call props to their default values. + p0 = resetCallProps(p0, callIndex0, pred) + // Try to minimize individual calls. for i := 0; i < len(p0.Calls); i++ { ctx := &minimizeArgsCtx{ @@ -78,6 +82,23 @@ func removeCalls(p0 *Prog, callIndex0 int, crash bool, pred func(*Prog, int) boo return p0, callIndex0 } +func resetCallProps(p0 *Prog, callIndex0 int, pred func(*Prog, int) bool) *Prog { + // Try to reset all call props to their default values. + // This should be reasonable for many progs. + p := p0.Clone() + anyDifferent := false + for idx := range p.Calls { + if !reflect.DeepEqual(p.Calls[idx].Props, CallProps{}) { + p.Calls[idx].Props = CallProps{} + anyDifferent = true + } + } + if anyDifferent && pred(p, callIndex0) { + return p + } + return p0 +} + func minimizeCallProps(p0 *Prog, callIndex, callIndex0 int, pred func(*Prog, int) bool) *Prog { props := p0.Calls[callIndex].Props @@ -90,6 +111,15 @@ func minimizeCallProps(p0 *Prog, callIndex, callIndex0 int, pred func(*Prog, int } } + // Try to drop async. + if props.Async { + p := p0.Clone() + p.Calls[callIndex].Props.Async = false + if pred(p, callIndex0) { + p0 = p + } + } + return p0 } diff --git a/prog/minimization_test.go b/prog/minimization_test.go index 032b2b080..cf499b7f1 100644 --- a/prog/minimization_test.go +++ b/prog/minimization_test.go @@ -171,6 +171,28 @@ func TestMinimize(t *testing.T) { "pipe2(0x0, 0x0) (fail_nth: 5)\n", -1, }, + // Clear unneeded async flag. + { + "linux", "amd64", + "pipe2(0x0, 0x0) (async)\n", + -1, + func(p *Prog, callIndex int) bool { + return len(p.Calls) == 1 && p.Calls[0].Meta.Name == "pipe2" + }, + "pipe2(0x0, 0x0)\n", + -1, + }, + // Keep important async flag. + { + "linux", "amd64", + "pipe2(0x0, 0x0) (async)\n", + -1, + func(p *Prog, callIndex int) bool { + return len(p.Calls) == 1 && p.Calls[0].Meta.Name == "pipe2" && p.Calls[0].Props.Async + }, + "pipe2(0x0, 0x0) (async)\n", + -1, + }, } t.Parallel() for ti, test := range tests { diff --git a/prog/parse_test.go b/prog/parse_test.go index 8de2e36da..eddd03873 100644 --- a/prog/parse_test.go +++ b/prog/parse_test.go @@ -30,7 +30,7 @@ gettid() if ent.Proc != 0 { t.Fatalf("proc %v, want 0", ent.Proc) } - if ent.P.HasFaultInjection() { + if ent.P.RequiredFeatures().FaultInjection { t.Fatalf("fault injection enabled") } want := "getpid-gettid" @@ -67,7 +67,7 @@ func TestParseMulti(t *testing.T) { t.Fatalf("bad procs") } for i, ent := range entries { - if ent.P.HasFaultInjection() { + if ent.P.RequiredFeatures().FaultInjection { t.Fatalf("prog %v has fault injection enabled", i) } } diff --git a/prog/prog.go b/prog/prog.go index d41117a2f..09da7fdf2 100644 --- a/prog/prog.go +++ b/prog/prog.go @@ -19,7 +19,8 @@ type Prog struct { // IMPORTANT: keep the exact values of "key" tag for existing props unchanged, // otherwise the backwards compatibility would be broken. type CallProps struct { - FailNth int `key:"fail_nth"` + FailNth int `key:"fail_nth"` + Async bool `key:"async"` } type Call struct { |
