aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2017-12-18 11:43:32 +0100
committerDmitry Vyukov <dvyukov@google.com>2017-12-18 14:10:23 +0100
commit465b0b7833049fec5d90f3a24a47e3255065f71b (patch)
tree36966d90f431432f85ca0c518140857cdae2bf7e
parent9f48e03d80af4edfc99cf01811234cf5bf562aa2 (diff)
syz-fuzzer: improve deflaking during minimization
Currently we run an input 3 times to get minimal new coverage, and then during minimization trying only 1 time to get the same coverage. This plays poorly with flaky kernel coverage. Require at least 1 out of 3 runs during minimization to get the same new coverage. Experimental results suggest that this leads to higher quality corpus (though, systematic tuning proved to be very hard due to flakes and hard to explain effects on corpus size, program size, coverage and signal).
-rw-r--r--syz-fuzzer/proc.go81
1 files changed, 38 insertions, 43 deletions
diff --git a/syz-fuzzer/proc.go b/syz-fuzzer/proc.go
index 70e86c130..ea306a44a 100644
--- a/syz-fuzzer/proc.go
+++ b/syz-fuzzer/proc.go
@@ -110,54 +110,49 @@ func (proc *Proc) triageInput(item *WorkTriage) {
opts := *execOpts
opts.Flags |= ipc.FlagCollectCover
opts.Flags &= ^ipc.FlagCollide
- if item.minimized {
- // We just need to get input coverage.
- for i := 0; i < 3; i++ {
- info := proc.executeRaw(&opts, item.p, StatTriage)
- if len(info) == 0 || len(info[item.call].Cover) == 0 {
- continue // The call was not executed. Happens sometimes.
+ const (
+ signalRuns = 3
+ minimizeAttempts = 3
+ )
+ // Compute input coverage and non-flaky signal for minimization.
+ notexecuted := 0
+ for i := 0; i < signalRuns; i++ {
+ info := proc.executeRaw(&opts, item.p, StatTriage)
+ if len(info) == 0 || len(info[item.call].Signal) == 0 {
+ // The call was not executed. Happens sometimes.
+ notexecuted++
+ if notexecuted > signalRuns/2 {
+ return // if happens too often, give up
}
- inputCover = append([]uint32{}, info[item.call].Cover...)
- break
+ continue
}
- } else {
- // We need to compute input coverage and non-flaky signal for minimization.
- notexecuted := false
- for i := 0; i < 3; i++ {
- info := proc.executeRaw(&opts, item.p, StatTriage)
- if len(info) == 0 || len(info[item.call].Signal) == 0 {
- // The call was not executed. Happens sometimes.
- if notexecuted {
- return // if it happened twice, give up
- }
- notexecuted = true
- continue
- }
- inf := info[item.call]
- newSignal = cover.Intersection(newSignal, cover.Canonicalize(inf.Signal))
- if len(newSignal) == 0 {
- return
- }
- if len(inputCover) == 0 {
- inputCover = append([]uint32{}, inf.Cover...)
- } else {
- inputCover = cover.Union(inputCover, inf.Cover)
- }
+ inf := info[item.call]
+ newSignal = cover.Intersection(newSignal, cover.Canonicalize(inf.Signal))
+ // Without !minimized check manager starts losing some considerable amount
+ // of coverage after each restart. Mechanics of this are not completely clear.
+ if len(newSignal) == 0 && !item.minimized {
+ return
}
-
+ if len(inputCover) == 0 {
+ inputCover = append([]uint32{}, inf.Cover...)
+ } else {
+ inputCover = cover.Union(inputCover, inf.Cover)
+ }
+ }
+ if !item.minimized {
item.p, item.call = prog.Minimize(item.p, item.call, func(p1 *prog.Prog, call1 int) bool {
- info := proc.execute(execOpts, p1, false, false, false, true, StatMinimize)
- if len(info) == 0 || len(info[call1].Signal) == 0 {
- return false // The call was not executed.
- }
- inf := info[call1]
- signal := cover.Canonicalize(inf.Signal)
- signalMu.RLock()
- defer signalMu.RUnlock()
- if len(cover.Intersection(newSignal, signal)) != len(newSignal) {
- return false
+ for i := 0; i < minimizeAttempts; i++ {
+ info := proc.execute(execOpts, p1, false, false, false, true, StatMinimize)
+ if len(info) == 0 || len(info[call1].Signal) == 0 {
+ continue // The call was not executed.
+ }
+ inf := info[call1]
+ signal := cover.Canonicalize(inf.Signal)
+ if len(cover.Intersection(newSignal, signal)) == len(newSignal) {
+ return true
+ }
}
- return true
+ return false
}, false)
}