aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2017-12-17 18:34:17 +0100
committerDmitry Vyukov <dvyukov@google.com>2017-12-18 09:50:17 +0100
commit0d231ceb731d89cab71f6ad2ad1aa9d766b2e243 (patch)
treef2eddc72a82d78c10aac507f29d4713bf0106fc0
parentd5beb42acec986ef99739a4187ce8dd9513972cc (diff)
syz-fuzzer: refactor
syz-fuzzer organically grew from a small nice main function into a huge single-file monster with tons of global state. Start refactoring it into something more managable. This change separates 2 things: 1. Proc: a single fuzzing process (ipc.Env wrapper). 2. WorkQueue: holds global non-fuzzing work items. More work needed, but this is good first step.
-rw-r--r--pkg/ipc/ipc.go8
-rw-r--r--syz-fuzzer/fuzzer.go500
-rw-r--r--syz-fuzzer/proc.go362
-rw-r--r--syz-fuzzer/workqueue.go122
-rw-r--r--tools/syz-execprog/execprog.go3
5 files changed, 551 insertions, 444 deletions
diff --git a/pkg/ipc/ipc.go b/pkg/ipc/ipc.go
index ff17b52a3..7aa57dedf 100644
--- a/pkg/ipc/ipc.go
+++ b/pkg/ipc/ipc.go
@@ -166,14 +166,6 @@ type CallInfo struct {
FaultInjected bool
}
-func GetCompMaps(info []CallInfo) []prog.CompMap {
- compMaps := make([]prog.CompMap, len(info))
- for i, inf := range info {
- compMaps[i] = inf.Comps
- }
- return compMaps
-}
-
type Env struct {
in []byte
out []byte
diff --git a/syz-fuzzer/fuzzer.go b/syz-fuzzer/fuzzer.go
index 937a40d01..e907f715a 100644
--- a/syz-fuzzer/fuzzer.go
+++ b/syz-fuzzer/fuzzer.go
@@ -4,10 +4,8 @@
package main
import (
- "bytes"
"flag"
"fmt"
- "math/rand"
"net/http"
_ "net/http/pprof"
"os"
@@ -42,22 +40,6 @@ var (
flagTest = flag.Bool("test", false, "enable image testing mode") // used by syz-ci
)
-const (
- programLength = 30
-)
-
-type Input struct {
- p *prog.Prog
- call int
- signal []uint32
- minimized bool
-}
-
-type Candidate struct {
- p *prog.Prog
- minimized bool
-}
-
var (
manager *RpcClient
target *prog.Target
@@ -71,30 +53,36 @@ var (
corpus []*prog.Prog
corpusHashes map[hash.Sig]struct{}
- triageMu sync.RWMutex
- triage []Input
- triageCandidate []Input
- candidates []Candidate
- smashQueue []Input
-
- gate *ipc.Gate
-
- statExecGen uint64
- statExecFuzz uint64
- statExecCandidate uint64
- statExecTriage uint64
- statExecMinimize uint64
- statExecSmash uint64
- statNewInput uint64
- statExecHints uint64
- statExecHintSeeds uint64
-
allTriaged uint32
noCover bool
faultInjectionEnabled bool
compsSupported bool
)
+type Fuzzer struct {
+ config *ipc.Config
+ execOpts *ipc.ExecOpts
+ procs []*Proc
+ gate *ipc.Gate
+ workQueue *WorkQueue
+ choiceTable *prog.ChoiceTable
+ stats [StatCount]uint64
+}
+
+type Stat int
+
+const (
+ StatGenerate Stat = iota
+ StatFuzz
+ StatCandidate
+ StatTriage
+ StatMinimize
+ StatSmash
+ StatHint
+ StatSeed
+ StatCount
+)
+
func main() {
debug.SetGCPercent(50)
flag.Parse()
@@ -154,22 +142,6 @@ func main() {
for _, s := range r.MaxSignal {
maxSignal[s] = struct{}{}
}
- for _, candidate := range r.Candidates {
- p, err := target.Deserialize(candidate.Prog)
- if err != nil {
- panic(err)
- }
- // TODO: noCover is not yet initialized at this point.
- if noCover {
- corpusMu.Lock()
- corpus = append(corpus, p)
- corpusMu.Unlock()
- } else {
- triageMu.Lock()
- candidates = append(candidates, Candidate{p, candidate.Minimized})
- triageMu.Unlock()
- }
- }
// This requires "fault-inject: support systematic fault injection" kernel commit.
if fd, err := syscall.Open("/proc/self/fail-nth", syscall.O_RDWR, 0); err == nil {
@@ -247,93 +219,37 @@ func main() {
if !*flagLeak {
leakCallback = nil
}
- gate = ipc.NewGate(2**flagProcs, leakCallback)
needPoll := make(chan struct{}, 1)
needPoll <- struct{}{}
- envs := make([]*ipc.Env, *flagProcs)
- for pid := 0; pid < *flagProcs; pid++ {
- env, err := ipc.MakeEnv(config, pid)
+ fuzzer := &Fuzzer{
+ config: config,
+ execOpts: execOpts,
+ gate: ipc.NewGate(2**flagProcs, leakCallback),
+ workQueue: newWorkQueue(*flagProcs, needPoll),
+ choiceTable: ct,
+ }
+
+ for _, candidate := range r.Candidates {
+ p, err := target.Deserialize(candidate.Prog)
if err != nil {
panic(err)
}
- envs[pid] = env
-
- pid := pid
- go func() {
- rs := rand.NewSource(time.Now().UnixNano() + int64(pid)*1e12)
- rnd := rand.New(rs)
-
- for i := 0; ; i++ {
- triageMu.RLock()
- Logf(1, "#%v: triageCandidate=%v candidates=%v triage=%v smashQueue=%v",
- pid, len(triageCandidate), len(candidates), len(triage),
- len(smashQueue))
- if len(triageCandidate) != 0 || len(candidates) != 0 || len(triage) != 0 || len(smashQueue) != 0 {
- triageMu.RUnlock()
- triageMu.Lock()
- if len(triageCandidate) != 0 {
- last := len(triageCandidate) - 1
- inp := triageCandidate[last]
- triageCandidate = triageCandidate[:last]
- triageMu.Unlock()
- Logf(1, "#%v: triaging candidate", pid)
- triageInput(pid, env, execOpts, inp)
- continue
- } else if len(candidates) != 0 {
- last := len(candidates) - 1
- candidate := candidates[last]
- candidates = candidates[:last]
- wakePoll := len(candidates) < *flagProcs
- triageMu.Unlock()
- if wakePoll {
- select {
- case needPoll <- struct{}{}:
- default:
- }
- }
- Logf(1, "#%v: executing candidate", pid)
- execute(pid, env, execOpts, candidate.p, false, candidate.minimized, true, false, &statExecCandidate)
- continue
- } else if len(triage) != 0 {
- last := len(triage) - 1
- inp := triage[last]
- triage = triage[:last]
- triageMu.Unlock()
- Logf(1, "#%v: triaging", pid)
- triageInput(pid, env, execOpts, inp)
- continue
- } else if len(smashQueue) != 0 {
- last := len(smashQueue) - 1
- inp := smashQueue[last]
- smashQueue = smashQueue[:last]
- triageMu.Unlock()
- Logf(1, "#%v: smashing", pid)
- smashInput(pid, env, execOpts, ct, rs, inp)
- continue
- } else {
- triageMu.Unlock()
- }
- } else {
- triageMu.RUnlock()
- }
+ if noCover {
+ corpusMu.Lock()
+ corpus = append(corpus, p)
+ corpusMu.Unlock()
+ } else {
+ fuzzer.workQueue.enqueue(&WorkCandidate{p, candidate.Minimized})
+ }
+ }
- corpusMu.RLock()
- if len(corpus) == 0 || i%100 == 0 {
- // Generate a new prog.
- corpusMu.RUnlock()
- p := target.Generate(rnd, programLength, ct)
- Logf(1, "#%v: generated", pid)
- execute(pid, env, execOpts, p, false, false, false, false, &statExecGen)
- } else {
- // Mutate an existing prog.
- p := corpus[rnd.Intn(len(corpus))].Clone()
- corpusMu.RUnlock()
- p.Mutate(rs, programLength, ct, corpus)
- Logf(1, "#%v: mutated", pid)
- execute(pid, env, execOpts, p, false, false, false, false, &statExecFuzz)
- }
- }
- }()
+ for pid := 0; pid < *flagProcs; pid++ {
+ proc, err := newProc(fuzzer, pid)
+ if err != nil {
+ Fatalf("failed to create proc: %v", err)
+ }
+ fuzzer.procs = append(fuzzer.procs, proc)
+ go proc.loop()
}
var execTotal uint64
@@ -353,10 +269,8 @@ func main() {
lastPrint = time.Now()
}
if poll || time.Since(lastPoll) > 10*time.Second {
- triageMu.RLock()
- needCandidates := len(candidates) < *flagProcs
- triageMu.RUnlock()
- if !needCandidates && poll {
+ needCandidates := fuzzer.workQueue.wantCandidates()
+ if poll && !needCandidates {
continue
}
@@ -372,25 +286,24 @@ func main() {
}
newSignal = make(map[uint32]struct{})
signalMu.Unlock()
- for _, env := range envs {
- a.Stats["exec total"] += atomic.SwapUint64(&env.StatExecs, 0)
- a.Stats["executor restarts"] += atomic.SwapUint64(&env.StatRestarts, 0)
+ for _, proc := range fuzzer.procs {
+ a.Stats["exec total"] += atomic.SwapUint64(&proc.env.StatExecs, 0)
+ a.Stats["executor restarts"] += atomic.SwapUint64(&proc.env.StatRestarts, 0)
}
- stat := func(p *uint64, name string) {
- v := atomic.SwapUint64(p, 0)
+ stat := func(stat Stat, name string) {
+ v := atomic.SwapUint64(&fuzzer.stats[stat], 0)
a.Stats[name] = v
execTotal += v
}
- stat(&statExecGen, "exec gen")
- stat(&statExecFuzz, "exec fuzz")
- stat(&statExecCandidate, "exec candidate")
- stat(&statExecTriage, "exec triage")
- stat(&statExecMinimize, "exec minimize")
- stat(&statExecSmash, "exec smash")
- stat(&statExecHints, "exec hints")
- stat(&statExecHintSeeds, "exec seeds")
-
- a.Stats["fuzzer new inputs"] = atomic.SwapUint64(&statNewInput, 0)
+ stat(StatGenerate, "exec gen")
+ stat(StatFuzz, "exec fuzz")
+ stat(StatCandidate, "exec candidate")
+ stat(StatTriage, "exec triage")
+ stat(StatMinimize, "exec minimize")
+ stat(StatSmash, "exec smash")
+ stat(StatHint, "exec hints")
+ stat(StatSeed, "exec seeds")
+
r := &PollRes{}
if err := manager.Call("Manager.Poll", a, r); err != nil {
panic(err)
@@ -417,9 +330,7 @@ func main() {
corpus = append(corpus, p)
corpusMu.Unlock()
} else {
- triageMu.Lock()
- candidates = append(candidates, Candidate{p, candidate.Minimized})
- triageMu.Unlock()
+ fuzzer.workQueue.enqueue(&WorkCandidate{p, candidate.Minimized})
}
}
if len(r.Candidates) == 0 && atomic.LoadUint32(&allTriaged) == 0 {
@@ -495,282 +406,3 @@ func addInput(inp RpcInput) {
cover.SignalAdd(maxSignal, diff)
}
}
-
-func smashInput(pid int, env *ipc.Env, execOpts *ipc.ExecOpts, ct *prog.ChoiceTable, rs rand.Source, inp Input) {
- if faultInjectionEnabled {
- failCall(pid, env, execOpts, inp.p, inp.call)
- }
- for i := 0; i < 100; i++ {
- p := inp.p.Clone()
- p.Mutate(rs, programLength, ct, corpus)
- Logf(1, "#%v: smash mutated", pid)
- execute(pid, env, execOpts, p, false, false, false, false, &statExecSmash)
- }
- if compsSupported {
- executeHintSeed(pid, env, execOpts, inp.p, inp.call)
- }
-}
-
-func failCall(pid int, env *ipc.Env, execOpts *ipc.ExecOpts, p *prog.Prog, call int) {
- for nth := 0; nth < 100; nth++ {
- Logf(1, "#%v: injecting fault into call %v/%v", pid, call, nth)
- opts := *execOpts
- opts.Flags |= ipc.FlagInjectFault
- opts.FaultCall = call
- opts.FaultNth = nth
- info := execute1(pid, env, &opts, p, &statExecSmash)
- if info != nil && len(info) > call && !info[call].FaultInjected {
- break
- }
- }
-}
-
-func triageInput(pid int, env *ipc.Env, execOpts *ipc.ExecOpts, inp Input) {
- if noCover {
- panic("should not be called when coverage is disabled")
- }
-
- signalMu.RLock()
- newSignal := cover.SignalDiff(corpusSignal, inp.signal)
- signalMu.RUnlock()
- if len(newSignal) == 0 {
- return
- }
- newSignal = cover.Canonicalize(newSignal)
-
- call := inp.p.Calls[inp.call].Meta
- data := inp.p.Serialize()
- sig := hash.Hash(data)
-
- Logf(3, "triaging input for %v (new signal=%v):\n%s", call.CallName, len(newSignal), data)
- var inputCover cover.Cover
- opts := *execOpts
- opts.Flags |= ipc.FlagCollectCover
- opts.Flags &= ^ipc.FlagCollide
- if inp.minimized {
- // We just need to get input coverage.
- for i := 0; i < 3; i++ {
- info := execute1(pid, env, &opts, inp.p, &statExecTriage)
- if len(info) == 0 || len(info[inp.call].Cover) == 0 {
- continue // The call was not executed. Happens sometimes.
- }
- inputCover = append([]uint32{}, info[inp.call].Cover...)
- break
- }
- } else {
- // We need to compute input coverage and non-flaky signal for minimization.
- notexecuted := false
- for i := 0; i < 3; i++ {
- info := execute1(pid, env, &opts, inp.p, &statExecTriage)
- if len(info) == 0 || len(info[inp.call].Signal) == 0 {
- // The call was not executed. Happens sometimes.
- if notexecuted {
- return // if it happened twice, give up
- }
- notexecuted = true
- continue
- }
- inf := info[inp.call]
- newSignal = cover.Intersection(newSignal, cover.Canonicalize(inf.Signal))
- if len(newSignal) == 0 {
- return
- }
- if len(inputCover) == 0 {
- inputCover = append([]uint32{}, inf.Cover...)
- } else {
- inputCover = cover.Union(inputCover, inf.Cover)
- }
- }
-
- inp.p, inp.call = prog.Minimize(inp.p, inp.call, func(p1 *prog.Prog, call1 int) bool {
- info := execute(pid, env, execOpts, p1, false, false, false, true, &statExecMinimize)
- if len(info) == 0 || len(info[call1].Signal) == 0 {
- return false // The call was not executed.
- }
- inf := info[call1]
- signal := cover.Canonicalize(inf.Signal)
- signalMu.RLock()
- defer signalMu.RUnlock()
- if len(cover.Intersection(newSignal, signal)) != len(newSignal) {
- return false
- }
- return true
- }, false)
- }
-
- atomic.AddUint64(&statNewInput, 1)
- Logf(2, "added new input for %v to corpus:\n%s", call.CallName, data)
- a := &NewInputArgs{
- Name: *flagName,
- RpcInput: RpcInput{
- Call: call.CallName,
- Prog: data,
- Signal: []uint32(cover.Canonicalize(inp.signal)),
- Cover: []uint32(inputCover),
- },
- }
- if err := manager.Call("Manager.NewInput", a, nil); err != nil {
- panic(err)
- }
-
- signalMu.Lock()
- cover.SignalAdd(corpusSignal, inp.signal)
- signalMu.Unlock()
-
- corpusMu.Lock()
- if _, ok := corpusHashes[sig]; !ok {
- corpus = append(corpus, inp.p)
- corpusHashes[sig] = struct{}{}
- }
- corpusMu.Unlock()
-
- if !inp.minimized {
- triageMu.Lock()
- smashQueue = append(smashQueue, inp)
- triageMu.Unlock()
- }
-}
-
-func executeHintSeed(pid int, env *ipc.Env, execOpts *ipc.ExecOpts, p *prog.Prog, call int) {
- Logf(1, "#%v: collecting comparisons", pid)
- // First execute the original program to dump comparisons from KCOV.
- info := execute(pid, env, execOpts, p, true, false, false, true, &statExecHintSeeds)
- if info == nil {
- return
- }
-
- // Then extract the comparisons data.
- compMaps := ipc.GetCompMaps(info)
-
- // Then mutate the initial program for every match between
- // a syscall argument and a comparison operand.
- // Execute each of such mutants to check if it gives new coverage.
- p.MutateWithHints(call, compMaps[call], func(p *prog.Prog) {
- Logf(1, "#%v: executing comparison hint", pid)
- execute(pid, env, execOpts, p, false, false, false, false, &statExecHints)
- })
-}
-
-func execute(pid int, env *ipc.Env, execOpts *ipc.ExecOpts, p *prog.Prog,
- needComps, minimized, candidate, noCollide bool, stat *uint64) []ipc.CallInfo {
- opts := *execOpts
- if needComps {
- if !compsSupported {
- panic("compsSupported==false and execute() called with needComps")
- }
- opts.Flags |= ipc.FlagCollectComps
- }
- if noCollide {
- opts.Flags &= ^ipc.FlagCollide
- }
- info := execute1(pid, env, &opts, p, stat)
- signalMu.RLock()
- defer signalMu.RUnlock()
-
- for i, inf := range info {
- if !cover.SignalNew(maxSignal, inf.Signal) {
- continue
- }
- diff := cover.SignalDiff(maxSignal, inf.Signal)
-
- signalMu.RUnlock()
- signalMu.Lock()
- cover.SignalAdd(maxSignal, diff)
- cover.SignalAdd(newSignal, diff)
- signalMu.Unlock()
- signalMu.RLock()
-
- inp := Input{
- p: p.Clone(),
- call: i,
- signal: append([]uint32{}, inf.Signal...),
- minimized: minimized,
- }
- triageMu.Lock()
- if candidate {
- triageCandidate = append(triageCandidate, inp)
- } else {
- triage = append(triage, inp)
- }
- triageMu.Unlock()
- }
- return info
-}
-
-var logMu sync.Mutex
-
-func execute1(pid int, env *ipc.Env, opts *ipc.ExecOpts, p *prog.Prog, stat *uint64) []ipc.CallInfo {
- if false {
- // For debugging, this function must not be executed with locks held.
- corpusMu.Lock()
- corpusMu.Unlock()
- signalMu.Lock()
- signalMu.Unlock()
- triageMu.Lock()
- triageMu.Unlock()
- }
- if opts.Flags&ipc.FlagDedupCover == 0 {
- panic("dedup cover is not enabled")
- }
-
- // Limit concurrency window and do leak checking once in a while.
- idx := gate.Enter()
- defer gate.Leave(idx)
-
- strOpts := ""
- if opts.Flags&ipc.FlagInjectFault != 0 {
- strOpts = fmt.Sprintf(" (fault-call:%v fault-nth:%v)", opts.FaultCall, opts.FaultNth)
- }
-
- // The following output helps to understand what program crashed kernel.
- // It must not be intermixed.
- switch *flagOutput {
- case "none":
- // This case intentionally left blank.
- case "stdout":
- data := p.Serialize()
- logMu.Lock()
- Logf(0, "executing program %v%v:\n%s", pid, strOpts, data)
- logMu.Unlock()
- case "dmesg":
- fd, err := syscall.Open("/dev/kmsg", syscall.O_WRONLY, 0)
- if err == nil {
- buf := new(bytes.Buffer)
- fmt.Fprintf(buf, "syzkaller: executing program %v%v:\n%s", pid, strOpts, p.Serialize())
- syscall.Write(fd, buf.Bytes())
- syscall.Close(fd)
- }
- case "file":
- f, err := os.Create(fmt.Sprintf("%v-%v.prog", *flagName, pid))
- if err == nil {
- if strOpts != "" {
- fmt.Fprintf(f, "#%v\n", strOpts)
- }
- f.Write(p.Serialize())
- f.Close()
- }
- }
-
- try := 0
-retry:
- atomic.AddUint64(stat, 1)
- output, info, failed, hanged, err := env.Exec(opts, p)
- if failed {
- // BUG in output should be recognized by manager.
- Logf(0, "BUG: executor-detected bug:\n%s", output)
- // Don't return any cover so that the input is not added to corpus.
- return nil
- }
- if err != nil {
- if _, ok := err.(ipc.ExecutorFailure); ok || try > 10 {
- panic(err)
- }
- try++
- Logf(4, "fuzzer detected executor failure='%v', retrying #%d\n", err, (try + 1))
- debug.FreeOSMemory()
- time.Sleep(time.Second)
- goto retry
- }
- Logf(2, "result failed=%v hanged=%v: %v\n", failed, hanged, string(output))
- return info
-}
diff --git a/syz-fuzzer/proc.go b/syz-fuzzer/proc.go
new file mode 100644
index 000000000..9d4b86f8e
--- /dev/null
+++ b/syz-fuzzer/proc.go
@@ -0,0 +1,362 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "os"
+ "runtime/debug"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "time"
+
+ "github.com/google/syzkaller/pkg/cover"
+ "github.com/google/syzkaller/pkg/hash"
+ "github.com/google/syzkaller/pkg/ipc"
+ . "github.com/google/syzkaller/pkg/log"
+ . "github.com/google/syzkaller/pkg/rpctype"
+ "github.com/google/syzkaller/prog"
+)
+
+const (
+ programLength = 30
+)
+
+// Proc represents a single fuzzing process (executor).
+type Proc struct {
+ fuzzer *Fuzzer
+ pid int
+ env *ipc.Env
+ rnd *rand.Rand
+}
+
+func newProc(fuzzer *Fuzzer, pid int) (*Proc, error) {
+ env, err := ipc.MakeEnv(fuzzer.config, pid)
+ if err != nil {
+ return nil, err
+ }
+ rnd := rand.New(rand.NewSource(time.Now().UnixNano() + int64(pid)*1e12))
+ proc := &Proc{
+ fuzzer: fuzzer,
+ pid: pid,
+ env: env,
+ rnd: rnd,
+ }
+ return proc, nil
+}
+
+func (proc *Proc) loop() {
+ pid := proc.pid
+ execOpts := proc.fuzzer.execOpts
+ ct := proc.fuzzer.choiceTable
+
+ for i := 0; ; i++ {
+ item := proc.fuzzer.workQueue.dequeue()
+ if item != nil {
+ switch item := item.(type) {
+ case *WorkTriage:
+ proc.triageInput(item)
+ case *WorkCandidate:
+ proc.execute(execOpts, item.p, false, item.minimized,
+ true, false, StatCandidate)
+ case *WorkSmash:
+ proc.smashInput(item)
+ default:
+ panic("unknown work type")
+ }
+ continue
+ }
+
+ corpusMu.RLock()
+ if len(corpus) == 0 || i%100 == 0 {
+ // Generate a new prog.
+ corpusMu.RUnlock()
+ p := target.Generate(proc.rnd, programLength, ct)
+ Logf(1, "#%v: generated", pid)
+ proc.execute(execOpts, p, false, false, false, false, StatGenerate)
+ } else {
+ // Mutate an existing prog.
+ p := corpus[proc.rnd.Intn(len(corpus))].Clone()
+ corpusMu.RUnlock()
+ // TODO: it seems that access to corpus is not proceted here.
+ p.Mutate(proc.rnd, programLength, ct, corpus)
+ Logf(1, "#%v: mutated", pid)
+ proc.execute(execOpts, p, false, false, false, false, StatFuzz)
+ }
+ }
+}
+
+func (proc *Proc) triageInput(item *WorkTriage) {
+ Logf(1, "#%v: triaging minimized=%v candidate=%v", proc.pid, item.minimized, item.candidate)
+
+ execOpts := proc.fuzzer.execOpts
+ if noCover {
+ panic("should not be called when coverage is disabled")
+ }
+
+ signalMu.RLock()
+ newSignal := cover.SignalDiff(corpusSignal, item.signal)
+ signalMu.RUnlock()
+ if len(newSignal) == 0 {
+ return
+ }
+ newSignal = cover.Canonicalize(newSignal)
+
+ call := item.p.Calls[item.call].Meta
+ data := item.p.Serialize()
+ sig := hash.Hash(data)
+
+ Logf(3, "triaging input for %v (new signal=%v):\n%s", call.CallName, len(newSignal), data)
+ var inputCover cover.Cover
+ opts := *execOpts
+ opts.Flags |= ipc.FlagCollectCover
+ opts.Flags &= ^ipc.FlagCollide
+ if item.minimized {
+ // We just need to get input coverage.
+ for i := 0; i < 3; i++ {
+ info := proc.executeRaw(&opts, item.p, StatTriage)
+ if len(info) == 0 || len(info[item.call].Cover) == 0 {
+ continue // The call was not executed. Happens sometimes.
+ }
+ inputCover = append([]uint32{}, info[item.call].Cover...)
+ break
+ }
+ } else {
+ // We need to compute input coverage and non-flaky signal for minimization.
+ notexecuted := false
+ for i := 0; i < 3; i++ {
+ info := proc.executeRaw(&opts, item.p, StatTriage)
+ if len(info) == 0 || len(info[item.call].Signal) == 0 {
+ // The call was not executed. Happens sometimes.
+ if notexecuted {
+ return // if it happened twice, give up
+ }
+ notexecuted = true
+ continue
+ }
+ inf := info[item.call]
+ newSignal = cover.Intersection(newSignal, cover.Canonicalize(inf.Signal))
+ if len(newSignal) == 0 {
+ return
+ }
+ if len(inputCover) == 0 {
+ inputCover = append([]uint32{}, inf.Cover...)
+ } else {
+ inputCover = cover.Union(inputCover, inf.Cover)
+ }
+ }
+
+ item.p, item.call = prog.Minimize(item.p, item.call, func(p1 *prog.Prog, call1 int) bool {
+ info := proc.execute(execOpts, p1, false, false, false, true, StatMinimize)
+ if len(info) == 0 || len(info[call1].Signal) == 0 {
+ return false // The call was not executed.
+ }
+ inf := info[call1]
+ signal := cover.Canonicalize(inf.Signal)
+ signalMu.RLock()
+ defer signalMu.RUnlock()
+ if len(cover.Intersection(newSignal, signal)) != len(newSignal) {
+ return false
+ }
+ return true
+ }, false)
+ }
+
+ Logf(2, "added new input for %v to corpus:\n%s", call.CallName, data)
+ a := &NewInputArgs{
+ Name: *flagName,
+ RpcInput: RpcInput{
+ Call: call.CallName,
+ Prog: data,
+ Signal: []uint32(cover.Canonicalize(item.signal)),
+ Cover: []uint32(inputCover),
+ },
+ }
+ if err := manager.Call("Manager.NewInput", a, nil); err != nil {
+ panic(err)
+ }
+
+ signalMu.Lock()
+ cover.SignalAdd(corpusSignal, item.signal)
+ signalMu.Unlock()
+
+ corpusMu.Lock()
+ if _, ok := corpusHashes[sig]; !ok {
+ corpus = append(corpus, item.p)
+ corpusHashes[sig] = struct{}{}
+ }
+ corpusMu.Unlock()
+
+ if !item.minimized {
+ proc.fuzzer.workQueue.enqueue(&WorkSmash{item.p, item.call})
+ }
+}
+
+func (proc *Proc) smashInput(item *WorkSmash) {
+ if faultInjectionEnabled {
+ proc.failCall(item.p, item.call)
+ }
+ for i := 0; i < 100; i++ {
+ p := item.p.Clone()
+ // TODO: it seems that access to corpus is not proceted here.
+ p.Mutate(proc.rnd, programLength, proc.fuzzer.choiceTable, corpus)
+ Logf(1, "#%v: smash mutated", proc.pid)
+ proc.execute(proc.fuzzer.execOpts, p, false, false, false, false, StatSmash)
+ }
+ if compsSupported {
+ proc.executeHintSeed(item.p, item.call)
+ }
+}
+
+func (proc *Proc) failCall(p *prog.Prog, call int) {
+ for nth := 0; nth < 100; nth++ {
+ Logf(1, "#%v: injecting fault into call %v/%v", proc.pid, call, nth)
+ opts := *proc.fuzzer.execOpts
+ opts.Flags |= ipc.FlagInjectFault
+ opts.FaultCall = call
+ opts.FaultNth = nth
+ info := proc.executeRaw(&opts, p, StatSmash)
+ if info != nil && len(info) > call && !info[call].FaultInjected {
+ break
+ }
+ }
+}
+
+func (proc *Proc) executeHintSeed(p *prog.Prog, call int) {
+ Logf(1, "#%v: collecting comparisons", proc.pid)
+ // First execute the original program to dump comparisons from KCOV.
+ info := proc.execute(proc.fuzzer.execOpts, p, true, false, false, true, StatSeed)
+ if info == nil {
+ return
+ }
+
+ // Then mutate the initial program for every match between
+ // a syscall argument and a comparison operand.
+ // Execute each of such mutants to check if it gives new coverage.
+ p.MutateWithHints(call, info[call].Comps, func(p *prog.Prog) {
+ Logf(1, "#%v: executing comparison hint", proc.pid)
+ proc.execute(proc.fuzzer.execOpts, p, false, false, false, false, StatHint)
+ })
+}
+
+func (proc *Proc) execute(execOpts *ipc.ExecOpts, p *prog.Prog,
+ needComps, minimized, candidate, noCollide bool, stat Stat) []ipc.CallInfo {
+
+ opts := *execOpts
+ if needComps {
+ if !compsSupported {
+ panic("compsSupported==false and execute() called with needComps")
+ }
+ opts.Flags |= ipc.FlagCollectComps
+ }
+ if noCollide {
+ opts.Flags &= ^ipc.FlagCollide
+ }
+ info := proc.executeRaw(&opts, p, stat)
+ signalMu.RLock()
+ defer signalMu.RUnlock()
+
+ for i, inf := range info {
+ if !cover.SignalNew(maxSignal, inf.Signal) {
+ continue
+ }
+ diff := cover.SignalDiff(maxSignal, inf.Signal)
+
+ signalMu.RUnlock()
+ signalMu.Lock()
+ cover.SignalAdd(maxSignal, diff)
+ cover.SignalAdd(newSignal, diff)
+ signalMu.Unlock()
+ signalMu.RLock()
+
+ proc.fuzzer.workQueue.enqueue(&WorkTriage{
+ p: p.Clone(),
+ call: i,
+ signal: append([]uint32{}, inf.Signal...),
+ candidate: candidate,
+ minimized: minimized,
+ })
+ }
+ return info
+}
+
+var logMu sync.Mutex
+
+func (proc *Proc) executeRaw(opts *ipc.ExecOpts, p *prog.Prog, stat Stat) []ipc.CallInfo {
+ if false {
+ // For debugging, this function must not be executed with locks held.
+ corpusMu.Lock()
+ corpusMu.Unlock()
+ signalMu.Lock()
+ signalMu.Unlock()
+ }
+ pid := proc.pid
+ if opts.Flags&ipc.FlagDedupCover == 0 {
+ panic("dedup cover is not enabled")
+ }
+
+ // Limit concurrency window and do leak checking once in a while.
+ ticket := proc.fuzzer.gate.Enter()
+ defer proc.fuzzer.gate.Leave(ticket)
+
+ strOpts := ""
+ if opts.Flags&ipc.FlagInjectFault != 0 {
+ strOpts = fmt.Sprintf(" (fault-call:%v fault-nth:%v)", opts.FaultCall, opts.FaultNth)
+ }
+
+ // The following output helps to understand what program crashed kernel.
+ // It must not be intermixed.
+ switch *flagOutput {
+ case "none":
+ // This case intentionally left blank.
+ case "stdout":
+ data := p.Serialize()
+ logMu.Lock()
+ Logf(0, "executing program %v%v:\n%s", pid, strOpts, data)
+ logMu.Unlock()
+ case "dmesg":
+ fd, err := syscall.Open("/dev/kmsg", syscall.O_WRONLY, 0)
+ if err == nil {
+ buf := new(bytes.Buffer)
+ fmt.Fprintf(buf, "syzkaller: executing program %v%v:\n%s", pid, strOpts, p.Serialize())
+ syscall.Write(fd, buf.Bytes())
+ syscall.Close(fd)
+ }
+ case "file":
+ f, err := os.Create(fmt.Sprintf("%v-%v.prog", *flagName, pid))
+ if err == nil {
+ if strOpts != "" {
+ fmt.Fprintf(f, "#%v\n", strOpts)
+ }
+ f.Write(p.Serialize())
+ f.Close()
+ }
+ }
+
+ try := 0
+retry:
+ atomic.AddUint64(&proc.fuzzer.stats[stat], 1)
+ output, info, failed, hanged, err := proc.env.Exec(opts, p)
+ if failed {
+ // BUG in output should be recognized by manager.
+ Logf(0, "BUG: executor-detected bug:\n%s", output)
+ // Don't return any cover so that the input is not added to corpus.
+ return nil
+ }
+ if err != nil {
+ if _, ok := err.(ipc.ExecutorFailure); ok || try > 10 {
+ panic(err)
+ }
+ try++
+ Logf(4, "fuzzer detected executor failure='%v', retrying #%d\n", err, (try + 1))
+ debug.FreeOSMemory()
+ time.Sleep(time.Second)
+ goto retry
+ }
+ Logf(2, "result failed=%v hanged=%v: %v\n", failed, hanged, string(output))
+ return info
+}
diff --git a/syz-fuzzer/workqueue.go b/syz-fuzzer/workqueue.go
new file mode 100644
index 000000000..b500e7c78
--- /dev/null
+++ b/syz-fuzzer/workqueue.go
@@ -0,0 +1,122 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package main
+
+import (
+ "sync"
+
+ "github.com/google/syzkaller/prog"
+)
+
+// WorkQueue holds global non-fuzzing work items (see the Work* structs below).
+// WorkQueue also does prioritization among work items, for example, we want
+// to triage and send to manager new inputs before we smash programs
+// in order to not permanently lose interesting programs in case of VM crash.
+type WorkQueue struct {
+ mu sync.RWMutex
+ triageCandidate []*WorkTriage
+ candidate []*WorkCandidate
+ triage []*WorkTriage
+ smash []*WorkSmash
+
+ procs int
+ needCandidates chan struct{}
+}
+
+// WorkTriage are programs for which we noticed potential new coverage during
+// first execution. But we are not sure yet if the coverage is real or not.
+// During triage we understand if these programs in fact give new coverage,
+// and if yes, minimize them and add to corpus.
+type WorkTriage struct {
+ p *prog.Prog
+ call int
+ signal []uint32
+ candidate bool
+ minimized bool
+}
+
+// WorkCandidate are programs from hub.
+// We don't know yet if they are useful for this fuzzer or not.
+// A proc handles them the same way as locally generated/mutated programs.
+type WorkCandidate struct {
+ p *prog.Prog
+ minimized bool
+}
+
+// WorkSmash are programs just added to corpus.
+// During smashing these programs receive a one-time special attention
+// (emit faults, collect comparison hints, etc).
+type WorkSmash struct {
+ p *prog.Prog
+ call int
+}
+
+func newWorkQueue(procs int, needCandidates chan struct{}) *WorkQueue {
+ return &WorkQueue{
+ procs: procs,
+ needCandidates: needCandidates,
+ }
+}
+
+func (wq *WorkQueue) enqueue(item interface{}) {
+ wq.mu.Lock()
+ defer wq.mu.Unlock()
+ switch item := item.(type) {
+ case *WorkTriage:
+ if item.candidate {
+ wq.triageCandidate = append(wq.triageCandidate, item)
+ } else {
+ wq.triage = append(wq.triage, item)
+ }
+ case *WorkCandidate:
+ wq.candidate = append(wq.candidate, item)
+ case *WorkSmash:
+ wq.smash = append(wq.smash, item)
+ default:
+ panic("unknown work type")
+ }
+}
+
+func (wq *WorkQueue) dequeue() (item interface{}) {
+ wq.mu.RLock()
+ if len(wq.triageCandidate)+len(wq.candidate)+len(wq.triage)+len(wq.smash) == 0 {
+ wq.mu.RUnlock()
+ return nil
+ }
+ wq.mu.RUnlock()
+ wq.mu.Lock()
+ wantCandidates := false
+ if len(wq.triageCandidate) != 0 {
+ last := len(wq.triageCandidate) - 1
+ item = wq.triageCandidate[last]
+ wq.triageCandidate = wq.triageCandidate[:last]
+ } else if len(wq.candidate) != 0 {
+ last := len(wq.candidate) - 1
+ item = wq.candidate[last]
+ wq.candidate = wq.candidate[:last]
+ wantCandidates = len(wq.candidate) < wq.procs
+ } else if len(wq.triage) != 0 {
+ last := len(wq.triage) - 1
+ item = wq.triage[last]
+ wq.triage = wq.triage[:last]
+ } else if len(wq.smash) != 0 {
+ last := len(wq.smash) - 1
+ item = wq.smash[last]
+ wq.smash = wq.smash[:last]
+ }
+ wq.mu.Unlock()
+ if wantCandidates {
+ select {
+ case wq.needCandidates <- struct{}{}:
+ default:
+ }
+ }
+ return item
+}
+
+func (wq *WorkQueue) wantCandidates() bool {
+ wq.mu.RLock()
+ defer wq.mu.RUnlock()
+ return len(wq.candidate) < wq.procs
+}
diff --git a/tools/syz-execprog/execprog.go b/tools/syz-execprog/execprog.go
index ff0ceb4ca..ef5a0479a 100644
--- a/tools/syz-execprog/execprog.go
+++ b/tools/syz-execprog/execprog.go
@@ -184,13 +184,12 @@ func main() {
}
}
if *flagHints {
- compMaps := ipc.GetCompMaps(info)
ncomps, ncandidates := 0, 0
for i := range entry.P.Calls {
if *flagOutput == "stdout" {
fmt.Printf("call %v:\n", i)
}
- comps := compMaps[i]
+ comps := info[i].Comps
for v, args := range comps {
ncomps += len(args)
if *flagOutput == "stdout" {