aboutsummaryrefslogtreecommitdiffstats
path: root/pkg
diff options
context:
space:
mode:
authorAleksandr Nogikh <nogikh@google.com>2024-02-16 22:47:59 +0100
committerAleksandr Nogikh <nogikh@google.com>2024-03-12 11:14:34 +0000
commitc35c26ec6312219507c518bae2e56c1ea46a5f36 (patch)
treece5b570187b5720857d7d1d38c4c399354f394bc /pkg
parent5d97b658d9c2ec0cd68e5632ce7f11bfe5d6c282 (diff)
pkg/fuzzer: factor out the fuzzing engine
This is the first step for #1541. Move the fuzzing engine that used to be interleaved with other syz-fuzzer code into a separate package. For now, the algorithm is more or less the same as it was, the only difference is that a pkg/fuzzer instance scales to the available computing power. Add an executor-based test that performs real fuzzing.
Diffstat (limited to 'pkg')
-rw-r--r--pkg/csource/generated.go19
-rw-r--r--pkg/fuzzer/corpus.go114
-rw-r--r--pkg/fuzzer/corpus_test.go98
-rw-r--r--pkg/fuzzer/fuzzer.go350
-rw-r--r--pkg/fuzzer/fuzzer_test.go290
-rw-r--r--pkg/fuzzer/job.go401
-rw-r--r--pkg/fuzzer/prio_queue.go100
-rw-r--r--pkg/fuzzer/prio_queue_test.go44
-rw-r--r--pkg/fuzzer/stats.go26
9 files changed, 1442 insertions, 0 deletions
diff --git a/pkg/csource/generated.go b/pkg/csource/generated.go
index 7c01243ce..2c33e7a29 100644
--- a/pkg/csource/generated.go
+++ b/pkg/csource/generated.go
@@ -12330,6 +12330,25 @@ static int do_sandbox_none(void)
}
#endif
+#if SYZ_EXECUTOR || __NR_syz_test_fuzzer1
+
+static void fake_crash(const char* name)
+{
+ failmsg("crash", "{{CRASH: %s}}", name);
+ doexit(1);
+}
+
+static long syz_test_fuzzer1(volatile long a, volatile long b, volatile long c)
+{
+ if (a == 1 && b == 1 && c == 1)
+ fake_crash("first bug");
+ if (a == 1 && b == 2 && c == 3)
+ fake_crash("second bug");
+ return 0;
+}
+
+#endif
+
#elif GOOS_windows
#include <direct.h>
diff --git a/pkg/fuzzer/corpus.go b/pkg/fuzzer/corpus.go
new file mode 100644
index 000000000..b92ab1c64
--- /dev/null
+++ b/pkg/fuzzer/corpus.go
@@ -0,0 +1,114 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package fuzzer
+
+import (
+ "math/rand"
+ "sort"
+ "sync"
+
+ "github.com/google/syzkaller/pkg/hash"
+ "github.com/google/syzkaller/pkg/signal"
+ "github.com/google/syzkaller/prog"
+)
+
+type Corpus struct {
+ mu sync.RWMutex
+ progs []*prog.Prog
+ hashes map[hash.Sig]struct{}
+ sumPrios int64
+ accPrios []int64
+ signal signal.Signal // signal of inputs in corpus
+ maxSignal signal.Signal // max signal ever observed (including flakes)
+ newSignal signal.Signal
+}
+
+// CorpusStat is a snapshot of the relevant current state figures.
+type CorpusStat struct {
+ Progs int
+ Signal int
+ MaxSignal int
+}
+
+func newCorpus() *Corpus {
+ return &Corpus{
+ hashes: make(map[hash.Sig]struct{}),
+ }
+}
+
+// TODO: maybe we want to treat progs from other fuzzers exactly like
+// candidates? And even triage them?
+func (corpus *Corpus) Save(p *prog.Prog, signal signal.Signal, sig hash.Sig) {
+ corpus.mu.Lock()
+ defer corpus.mu.Unlock()
+ if _, ok := corpus.hashes[sig]; !ok {
+ corpus.progs = append(corpus.progs, p)
+ corpus.hashes[sig] = struct{}{}
+ prio := int64(len(signal))
+ if prio == 0 {
+ prio = 1
+ }
+ corpus.sumPrios += prio
+ corpus.accPrios = append(corpus.accPrios, corpus.sumPrios)
+ }
+ corpus.signal.Merge(signal)
+ corpus.maxSignal.Merge(signal)
+}
+
+// Signal that should no longer be chased after.
+func (corpus *Corpus) AddMaxSignal(sign signal.Signal) {
+ // TODO: how do we ensure occasional drop of this max cover?
+ corpus.mu.Lock()
+ defer corpus.mu.Unlock()
+ corpus.maxSignal.Merge(sign)
+}
+
+func (corpus *Corpus) AddRawMaxSignal(signal []uint32, prio uint8) signal.Signal {
+ corpus.mu.Lock()
+ defer corpus.mu.Unlock()
+ diff := corpus.maxSignal.DiffRaw(signal, prio)
+ if diff.Empty() {
+ return diff
+ }
+ corpus.maxSignal.Merge(diff)
+ corpus.newSignal.Merge(diff)
+ return diff
+}
+
+func (corpus *Corpus) chooseProgram(r *rand.Rand) *prog.Prog {
+ corpus.mu.RLock()
+ defer corpus.mu.RUnlock()
+ if len(corpus.progs) == 0 {
+ return nil
+ }
+ randVal := r.Int63n(corpus.sumPrios + 1)
+ idx := sort.Search(len(corpus.accPrios), func(i int) bool {
+ return corpus.accPrios[i] >= randVal
+ })
+ return corpus.progs[idx]
+}
+
+func (corpus *Corpus) Programs() []*prog.Prog {
+ corpus.mu.RLock()
+ defer corpus.mu.RUnlock()
+ return corpus.progs
+}
+
+func (corpus *Corpus) GrabNewSignal() signal.Signal {
+ corpus.mu.Lock()
+ defer corpus.mu.Unlock()
+ sign := corpus.newSignal
+ corpus.newSignal = nil
+ return sign
+}
+
+func (corpus *Corpus) Stat() CorpusStat {
+ corpus.mu.RLock()
+ defer corpus.mu.RUnlock()
+ return CorpusStat{
+ Progs: len(corpus.progs),
+ Signal: len(corpus.signal),
+ MaxSignal: len(corpus.maxSignal),
+ }
+}
diff --git a/pkg/fuzzer/corpus_test.go b/pkg/fuzzer/corpus_test.go
new file mode 100644
index 000000000..0b62d8c5a
--- /dev/null
+++ b/pkg/fuzzer/corpus_test.go
@@ -0,0 +1,98 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package fuzzer
+
+import (
+ "math"
+ "math/rand"
+ "testing"
+
+ "github.com/google/syzkaller/pkg/hash"
+ "github.com/google/syzkaller/pkg/signal"
+ "github.com/google/syzkaller/prog"
+ "github.com/google/syzkaller/sys/targets"
+)
+
+type InputTest struct {
+ p *prog.Prog
+ sign signal.Signal
+ sig hash.Sig
+}
+
+func TestChooseProgram(t *testing.T) {
+ rs := rand.NewSource(0)
+ r := rand.New(rs)
+ target := getTarget(t, targets.TestOS, targets.TestArch64)
+ corpus := newCorpus()
+
+ const (
+ maxIters = 1000
+ sizeCorpus = 1000
+ eps = 0.01
+ )
+
+ priorities := make(map[*prog.Prog]int64)
+ for i := 0; i < sizeCorpus; i++ {
+ sizeSig := i + 1
+ if sizeSig%250 == 0 {
+ sizeSig = 0
+ }
+ inp := generateInput(target, rs, 10, sizeSig)
+ corpus.Save(inp.p, inp.sign, inp.sig)
+ priorities[inp.p] = int64(len(inp.sign))
+ }
+ counters := make(map[*prog.Prog]int)
+ for it := 0; it < maxIters; it++ {
+ counters[corpus.chooseProgram(r)]++
+ }
+ for p, prio := range priorities {
+ prob := float64(prio) / float64(corpus.sumPrios)
+ diff := math.Abs(prob*maxIters - float64(counters[p]))
+ if diff > eps*maxIters {
+ t.Fatalf("the difference (%f) is higher than %f%%", diff, eps*100)
+ }
+ }
+}
+
+func TestCorpusSaveConcurrency(t *testing.T) {
+ target := getTarget(t, targets.TestOS, targets.TestArch64)
+ corpus := newCorpus()
+
+ const (
+ routines = 10
+ iters = 100
+ )
+
+ for i := 0; i < routines; i++ {
+ go func() {
+ rs := rand.NewSource(0)
+ r := rand.New(rs)
+ for it := 0; it < iters; it++ {
+ inp := generateInput(target, rs, 10, it)
+ corpus.Save(inp.p, inp.sign, inp.sig)
+ corpus.chooseProgram(r).Clone()
+ }
+ }()
+ }
+}
+
+func generateInput(target *prog.Target, rs rand.Source, ncalls, sizeSig int) (inp InputTest) {
+ inp.p = target.Generate(rs, ncalls, target.DefaultChoiceTable())
+ var raw []uint32
+ for i := 1; i <= sizeSig; i++ {
+ raw = append(raw, uint32(i))
+ }
+ inp.sign = signal.FromRaw(raw, 0)
+ inp.sig = hash.Hash(inp.p.Serialize())
+ return
+}
+
+func getTarget(t *testing.T, os, arch string) *prog.Target {
+ t.Parallel()
+ target, err := prog.GetTarget(os, arch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return target
+}
diff --git a/pkg/fuzzer/fuzzer.go b/pkg/fuzzer/fuzzer.go
new file mode 100644
index 000000000..64be8325c
--- /dev/null
+++ b/pkg/fuzzer/fuzzer.go
@@ -0,0 +1,350 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package fuzzer
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/google/syzkaller/pkg/hash"
+ "github.com/google/syzkaller/pkg/ipc"
+ "github.com/google/syzkaller/pkg/rpctype"
+ "github.com/google/syzkaller/prog"
+)
+
+type Fuzzer struct {
+ Config *Config
+ Corpus *Corpus
+ NeedCandidates chan struct{}
+
+ ctx context.Context
+ mu sync.Mutex
+ stats map[string]uint64
+ rnd *rand.Rand
+ target *prog.Target
+
+ ct *prog.ChoiceTable
+ ctProgs int
+ ctMu sync.Mutex // TODO: use RWLock.
+ ctRegenerate chan struct{}
+
+ nextExec *priorityQueue[*Request]
+ runningExecs map[*Request]time.Time
+ nextJobID atomic.Int64
+
+ queuedCandidates atomic.Int64
+ // If the source of candidates runs out of them, we risk
+ // generating too many needCandidate requests (one for
+ // each Config.MinCandidates). We prevent this with candidatesRequested.
+ candidatesRequested atomic.Bool
+}
+
+func NewFuzzer(ctx context.Context, cfg *Config, rnd *rand.Rand,
+ target *prog.Target) *Fuzzer {
+ f := &Fuzzer{
+ Config: cfg,
+ Corpus: newCorpus(),
+ NeedCandidates: make(chan struct{}, 1),
+
+ ctx: ctx,
+ stats: map[string]uint64{},
+ rnd: rnd,
+ target: target,
+
+ // We're okay to lose some of the messages -- if we are already
+ // regenerating the table, we don't want to repeat it right away.
+ ctRegenerate: make(chan struct{}),
+
+ nextExec: makePriorityQueue[*Request](),
+ runningExecs: map[*Request]time.Time{},
+ }
+ f.updateChoiceTable(nil)
+ go f.choiceTableUpdater()
+ if cfg.Debug {
+ go f.leakDetector()
+ go f.logCurrentStats()
+ }
+ return f
+}
+
+type Config struct {
+ Debug bool
+ Logf func(level int, msg string, args ...interface{})
+ Coverage bool
+ FaultInjection bool
+ Comparisons bool
+ Collide bool
+ EnabledCalls map[*prog.Syscall]bool
+ NoMutateCalls map[int]bool
+ LeakChecking bool
+ FetchRawCover bool
+ // If the number of queued candidates is less than MinCandidates,
+ // NeedCandidates is triggered.
+ MinCandidates uint
+ NewInputs chan rpctype.Input
+}
+
+type Request struct {
+ Prog *prog.Prog
+ NeedCover bool
+ NeedRawCover bool
+ NeedSignal bool
+ NeedHints bool
+ // Fields that are only relevant within pkg/fuzzer.
+ flags ProgTypes
+ stat string
+ result *Result
+ resultC chan *Result
+}
+
+type Result struct {
+ Info *ipc.ProgInfo
+ Stop bool
+}
+
+func (fuzzer *Fuzzer) Done(req *Request, res *Result) {
+ // Triage individual calls.
+ // We do it before unblocking the waiting threads because
+ // it may result it concurrent modification of req.Prog.
+ if req.NeedSignal && res.Info != nil {
+ for call, info := range res.Info.Calls {
+ fuzzer.triageProgCall(req.Prog, &info, call, req.flags)
+ }
+ fuzzer.triageProgCall(req.Prog, &res.Info.Extra, -1, req.flags)
+ }
+ // Unblock threads that wait for the result.
+ req.result = res
+ if req.resultC != nil {
+ req.resultC <- res
+ }
+ // Update stats.
+ fuzzer.mu.Lock()
+ fuzzer.stats[req.stat]++
+ delete(fuzzer.runningExecs, req)
+ fuzzer.mu.Unlock()
+}
+
+func (fuzzer *Fuzzer) triageProgCall(p *prog.Prog, info *ipc.CallInfo, call int,
+ flags ProgTypes) {
+ prio := signalPrio(p, info, call)
+ newMaxSignal := fuzzer.Corpus.AddRawMaxSignal(info.Signal, prio)
+ if newMaxSignal.Empty() {
+ return
+ }
+ fuzzer.Logf(2, "found new signal in call %d in %s", call, p)
+ fuzzer.startJob(&triageJob{
+ p: p.Clone(),
+ call: call,
+ info: *info,
+ newSignal: newMaxSignal,
+ flags: flags,
+ jobPriority: triageJobPrio(flags),
+ })
+}
+
+func signalPrio(p *prog.Prog, info *ipc.CallInfo, call int) (prio uint8) {
+ if call == -1 {
+ return 0
+ }
+ if info.Errno == 0 {
+ prio |= 1 << 1
+ }
+ if !p.Target.CallContainsAny(p.Calls[call]) {
+ prio |= 1 << 0
+ }
+ return
+}
+
+type Candidate struct {
+ Prog *prog.Prog
+ Hash hash.Sig
+ Smashed bool
+ Minimized bool
+}
+
+func (fuzzer *Fuzzer) NextInput() *Request {
+ req := fuzzer.nextInput()
+ fuzzer.mu.Lock()
+ fuzzer.runningExecs[req] = time.Now()
+ fuzzer.mu.Unlock()
+ if req.stat == statCandidate {
+ if fuzzer.queuedCandidates.Add(-1) < 0 {
+ panic("queuedCandidates is out of sync")
+ }
+ }
+ if fuzzer.NeedCandidatesNow() &&
+ !fuzzer.candidatesRequested.CompareAndSwap(false, true) {
+ select {
+ case fuzzer.NeedCandidates <- struct{}{}:
+ default:
+ }
+ }
+ return req
+}
+
+func (fuzzer *Fuzzer) nextInput() *Request {
+ nextExec := fuzzer.nextExec.tryPop()
+ if nextExec != nil {
+ return nextExec.value
+ }
+ // Either generate a new input or mutate an existing one.
+ mutateRate := 0.95
+ if !fuzzer.Config.Coverage {
+ // If we don't have real coverage signal, generate programs
+ // more frequently because fallback signal is weak.
+ mutateRate = 0.5
+ }
+ rnd := fuzzer.rand()
+ if rnd.Float64() < mutateRate {
+ req := mutateProgRequest(fuzzer, rnd)
+ if req != nil {
+ return req
+ }
+ }
+ return genProgRequest(fuzzer, rnd)
+}
+
+func (fuzzer *Fuzzer) startJob(newJob job) {
+ fuzzer.Logf(2, "started %T", newJob)
+ newJob.saveID(-fuzzer.nextJobID.Add(1))
+ go newJob.run(fuzzer)
+}
+
+func (fuzzer *Fuzzer) Logf(level int, msg string, args ...interface{}) {
+ if fuzzer.Config.Logf == nil {
+ return
+ }
+ fuzzer.Config.Logf(level, msg, args...)
+}
+
+func (fuzzer *Fuzzer) NeedCandidatesNow() bool {
+ return fuzzer.queuedCandidates.Load() < int64(fuzzer.Config.MinCandidates)
+}
+
+func (fuzzer *Fuzzer) AddCandidates(candidates []Candidate) {
+ fuzzer.queuedCandidates.Add(int64(len(candidates)))
+ for _, candidate := range candidates {
+ fuzzer.pushExec(candidateRequest(candidate), priority{candidatePrio})
+ }
+ fuzzer.candidatesRequested.Store(false)
+}
+
+func (fuzzer *Fuzzer) rand() *rand.Rand {
+ fuzzer.mu.Lock()
+ seed := fuzzer.rnd.Int63()
+ fuzzer.mu.Unlock()
+ return rand.New(rand.NewSource(seed))
+}
+
+func (fuzzer *Fuzzer) pushExec(req *Request, prio priority) {
+ if req.stat == "" {
+ panic("Request.Stat field must be set")
+ }
+ if req.NeedHints && (req.NeedCover || req.NeedSignal) {
+ panic("Request.NeedHints is mutually exclusive with other fields")
+ }
+ fuzzer.nextExec.push(&priorityQueueItem[*Request]{
+ value: req, prio: prio,
+ })
+}
+
+func (fuzzer *Fuzzer) exec(job job, req *Request) *Result {
+ req.resultC = make(chan *Result, 1)
+ fuzzer.pushExec(req, job.priority())
+ select {
+ case <-fuzzer.ctx.Done():
+ return &Result{Stop: true}
+ case res := <-req.resultC:
+ close(req.resultC)
+ return res
+ }
+}
+
+func (fuzzer *Fuzzer) leakDetector() {
+ const timeout = 20 * time.Minute
+ ticket := time.NewTicker(timeout)
+ defer ticket.Stop()
+ for {
+ select {
+ case now := <-ticket.C:
+ fuzzer.mu.Lock()
+ for req, startedTime := range fuzzer.runningExecs {
+ if now.Sub(startedTime) > timeout {
+ panic(fmt.Sprintf("execution timed out: %v", req))
+ }
+ }
+ fuzzer.mu.Unlock()
+ case <-fuzzer.ctx.Done():
+ return
+ }
+ }
+}
+
+func (fuzzer *Fuzzer) updateChoiceTable(programs []*prog.Prog) {
+ newCt := fuzzer.target.BuildChoiceTable(programs, fuzzer.Config.EnabledCalls)
+
+ fuzzer.ctMu.Lock()
+ defer fuzzer.ctMu.Unlock()
+ if len(programs) >= fuzzer.ctProgs {
+ fuzzer.ctProgs = len(programs)
+ fuzzer.ct = newCt
+ }
+}
+
+func (fuzzer *Fuzzer) choiceTableUpdater() {
+ for {
+ select {
+ case <-fuzzer.ctx.Done():
+ return
+ case <-fuzzer.ctRegenerate:
+ }
+ fuzzer.updateChoiceTable(fuzzer.Corpus.Programs())
+ }
+}
+
+func (fuzzer *Fuzzer) ChoiceTable() *prog.ChoiceTable {
+ progs := fuzzer.Corpus.Programs()
+
+ fuzzer.ctMu.Lock()
+ defer fuzzer.ctMu.Unlock()
+
+ // There were no deep ideas nor any calculations behind these numbers.
+ regenerateEveryProgs := 333
+ if len(progs) < 100 {
+ regenerateEveryProgs = 33
+ }
+ if fuzzer.ctProgs+regenerateEveryProgs < len(progs) {
+ select {
+ case fuzzer.ctRegenerate <- struct{}{}:
+ default:
+ // We're okay to lose the message.
+ // It means that we're already regenerating the table.
+ }
+ }
+ return fuzzer.ct
+}
+
+func (fuzzer *Fuzzer) logCurrentStats() {
+ for {
+ select {
+ case <-time.After(time.Minute):
+ case <-fuzzer.ctx.Done():
+ return
+ }
+
+ var m runtime.MemStats
+ runtime.ReadMemStats(&m)
+
+ fuzzer.mu.Lock()
+ str := fmt.Sprintf("exec queue size: %d, running execs: %d, heap (MB): %d",
+ fuzzer.nextExec.Len(), len(fuzzer.runningExecs), m.Alloc/1000/1000)
+ fuzzer.mu.Unlock()
+ fuzzer.Logf(0, "%s", str)
+ }
+}
diff --git a/pkg/fuzzer/fuzzer_test.go b/pkg/fuzzer/fuzzer_test.go
new file mode 100644
index 000000000..1896b2b84
--- /dev/null
+++ b/pkg/fuzzer/fuzzer_test.go
@@ -0,0 +1,290 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package fuzzer
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "hash/crc32"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/google/syzkaller/pkg/csource"
+ "github.com/google/syzkaller/pkg/ipc"
+ "github.com/google/syzkaller/pkg/ipc/ipcconfig"
+ "github.com/google/syzkaller/pkg/rpctype"
+ "github.com/google/syzkaller/pkg/testutil"
+ "github.com/google/syzkaller/prog"
+ "github.com/google/syzkaller/sys/targets"
+ "github.com/stretchr/testify/assert"
+ "golang.org/x/sync/errgroup"
+)
+
+func TestFuzz(t *testing.T) {
+ defer checkGoroutineLeaks()
+
+ target, err := prog.GetTarget(targets.TestOS, targets.TestArch64Fuzz)
+ if err != nil {
+ t.Fatal(err)
+ }
+ executor := buildExecutor(t, target)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ fuzzer := NewFuzzer(ctx, &Config{
+ Debug: true,
+ Logf: func(level int, msg string, args ...interface{}) {
+ if level > 1 {
+ return
+ }
+ t.Logf(msg, args...)
+ },
+ Coverage: true,
+ EnabledCalls: map[*prog.Syscall]bool{
+ target.SyscallMap["syz_test_fuzzer1"]: true,
+ },
+ NewInputs: make(chan rpctype.Input),
+ }, rand.New(testutil.RandSource(t)), target)
+
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case c := <-fuzzer.Config.NewInputs:
+ t.Logf("new prog:\n%s", c.Prog)
+ }
+ }
+ }()
+
+ tf := newTestFuzzer(t, fuzzer, map[string]bool{
+ "first bug": true,
+ "second bug": true,
+ }, 10000)
+
+ for i := 0; i < 2; i++ {
+ tf.registerExecutor(newProc(t, target, executor))
+ }
+ tf.wait()
+
+ t.Logf("resulting corpus:")
+ for _, p := range fuzzer.Corpus.Programs() {
+ t.Logf("-----")
+ t.Logf("%s", p.Serialize())
+ }
+
+ assert.Equal(t, len(tf.expectedCrashes), len(tf.crashes),
+ "not all expected crashes were found")
+}
+
+func BenchmarkFuzzer(b *testing.B) {
+ b.ReportAllocs()
+ target, err := prog.GetTarget(targets.TestOS, targets.TestArch64Fuzz)
+ if err != nil {
+ b.Fatal(err)
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ calls := map[*prog.Syscall]bool{}
+ for _, c := range target.Syscalls {
+ calls[c] = true
+ }
+ fuzzer := NewFuzzer(ctx, &Config{
+ Coverage: true,
+ EnabledCalls: calls,
+ }, rand.New(rand.NewSource(time.Now().UnixNano())), target)
+
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ req := fuzzer.NextInput()
+ res, _, _ := emulateExec(req)
+ fuzzer.Done(req, res)
+ }
+ })
+}
+
+// Based on the example from Go documentation.
+var crc32q = crc32.MakeTable(0xD5828281)
+
+func emulateExec(req *Request) (*Result, string, error) {
+ serializedLines := bytes.Split(req.Prog.Serialize(), []byte("\n"))
+ var info ipc.ProgInfo
+ for i, call := range req.Prog.Calls {
+ cover := uint32(call.Meta.ID*1024) +
+ crc32.Checksum(serializedLines[i], crc32q)%4
+ callInfo := ipc.CallInfo{}
+ if req.NeedCover {
+ callInfo.Cover = []uint32{cover}
+ }
+ if req.NeedSignal {
+ callInfo.Signal = []uint32{cover}
+ }
+ info.Calls = append(info.Calls, callInfo)
+ }
+ return &Result{Info: &info}, "", nil
+}
+
+type testFuzzer struct {
+ t testing.TB
+ eg errgroup.Group
+ fuzzer *Fuzzer
+ mu sync.Mutex
+ crashes map[string]int
+ expectedCrashes map[string]bool
+ iter int
+ iterLimit int
+}
+
+func newTestFuzzer(t testing.TB, fuzzer *Fuzzer, expectedCrashes map[string]bool, iterLimit int) *testFuzzer {
+ return &testFuzzer{
+ t: t,
+ fuzzer: fuzzer,
+ expectedCrashes: expectedCrashes,
+ crashes: map[string]int{},
+ iterLimit: iterLimit,
+ }
+}
+
+func (f *testFuzzer) oneMore() bool {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ f.iter++
+ if f.iter%100 == 0 {
+ stat := f.fuzzer.Corpus.Stat()
+ f.t.Logf("<iter %d>: corpus %d, signal %d, max signal %d, crash types %d",
+ f.iter, stat.Progs, stat.Signal, stat.MaxSignal, len(f.crashes))
+ }
+ return f.iter < f.iterLimit &&
+ (f.expectedCrashes == nil || len(f.crashes) != len(f.expectedCrashes))
+}
+
+func (f *testFuzzer) registerExecutor(proc *executorProc) {
+ f.eg.Go(func() error {
+ for f.oneMore() {
+ req := f.fuzzer.NextInput()
+ res, crash, err := proc.execute(req)
+ if err != nil {
+ return err
+ }
+ if crash != "" {
+ res = &Result{Stop: true}
+ if !f.expectedCrashes[crash] {
+ return fmt.Errorf("unexpected crash: %q", crash)
+ }
+ f.mu.Lock()
+ f.t.Logf("CRASH: %s", crash)
+ f.crashes[crash]++
+ f.mu.Unlock()
+ }
+ f.fuzzer.Done(req, res)
+ }
+ return nil
+ })
+}
+
+func (f *testFuzzer) wait() {
+ t := f.t
+ err := f.eg.Wait()
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("crashes:")
+ for title, cnt := range f.crashes {
+ t.Logf("%s: %d", title, cnt)
+ }
+ t.Logf("stats:\n%v", f.fuzzer.GrabStats())
+}
+
+// TODO: it's already implemented in syz-fuzzer/proc.go,
+// pkg/runtest and tools/syz-execprog.
+// Looks like it's time to factor out this functionality.
+type executorProc struct {
+ env *ipc.Env
+ execOpts ipc.ExecOpts
+}
+
+func newProc(t *testing.T, target *prog.Target, executor string) *executorProc {
+ config, execOpts, err := ipcconfig.Default(target)
+ if err != nil {
+ t.Fatal(err)
+ }
+ config.Executor = executor
+ config.Flags |= ipc.FlagSignal
+ env, err := ipc.MakeEnv(config, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(func() { env.Close() })
+ return &executorProc{
+ env: env,
+ execOpts: *execOpts,
+ }
+}
+
+var crashRe = regexp.MustCompile(`{{CRASH: (.*?)}}`)
+
+func (proc *executorProc) execute(req *Request) (*Result, string, error) {
+ execOpts := proc.execOpts
+ // TODO: it's duplicated from fuzzer.go.
+ if req.NeedSignal {
+ execOpts.Flags |= ipc.FlagCollectSignal
+ }
+ if req.NeedCover {
+ execOpts.Flags |= ipc.FlagCollectCover
+ }
+ // TODO: support req.NeedHints.
+ output, info, _, err := proc.env.Exec(&execOpts, req.Prog)
+ ret := crashRe.FindStringSubmatch(string(output))
+ if ret != nil {
+ return nil, ret[1], nil
+ } else if err != nil {
+ return nil, "", err
+ }
+ return &Result{Info: info}, "", nil
+}
+
+func buildExecutor(t *testing.T, target *prog.Target) string {
+ executor, err := csource.BuildFile(target,
+ filepath.FromSlash("../../executor/executor.cc"),
+ "-fsanitize-coverage=trace-pc", "-g",
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(func() { os.Remove(executor) })
+ return executor
+}
+
+func checkGoroutineLeaks() {
+ // Inspired by src/net/http/main_test.go.
+ buf := make([]byte, 2<<20)
+ err := ""
+ for i := 0; i < 3; i++ {
+ buf = buf[:runtime.Stack(buf, true)]
+ err = ""
+ for _, g := range strings.Split(string(buf), "\n\n") {
+ if !strings.Contains(g, "pkg/fuzzer/fuzzer.go") {
+ continue
+ }
+ err = fmt.Sprintf("%sLeaked goroutine:\n%s", err, g)
+ }
+ if err == "" {
+ return
+ }
+ // Give ctx.Done() a chance to propagate to all goroutines.
+ time.Sleep(100 * time.Millisecond)
+ }
+ if err != "" {
+ panic(err)
+ }
+}
diff --git a/pkg/fuzzer/job.go b/pkg/fuzzer/job.go
new file mode 100644
index 000000000..5a8f610b0
--- /dev/null
+++ b/pkg/fuzzer/job.go
@@ -0,0 +1,401 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package fuzzer
+
+import (
+ "fmt"
+ "math/rand"
+
+ "github.com/google/syzkaller/pkg/cover"
+ "github.com/google/syzkaller/pkg/hash"
+ "github.com/google/syzkaller/pkg/ipc"
+ "github.com/google/syzkaller/pkg/rpctype"
+ "github.com/google/syzkaller/pkg/signal"
+ "github.com/google/syzkaller/prog"
+)
+
+const (
+ smashPrio int64 = iota + 1
+ genPrio
+ triagePrio
+ candidatePrio
+ candidateTriagePrio
+)
+
+type job interface {
+ run(fuzzer *Fuzzer)
+ saveID(id int64)
+ priority() priority
+}
+
+type ProgTypes int
+
+const (
+ ProgCandidate ProgTypes = 1 << iota
+ ProgMinimized
+ ProgSmashed
+ ProgNormal ProgTypes = 0
+)
+
+type jobPriority struct {
+ prio priority
+}
+
+func newJobPriority(base int64) jobPriority {
+ prio := append(make(priority, 0, 2), base)
+ return jobPriority{prio}
+}
+
+func (jp jobPriority) priority() priority {
+ return jp.prio
+}
+
+// If we prioritize execution requests only by the base priorities of their origin
+// jobs, we risk letting 1000s of simultaneous jobs slowly progress in parallel.
+// It's better to let same-prio jobs that were started earlier finish first.
+// saveID() allows Fuzzer to attach this sub-prio at the moment of job creation.
+func (jp *jobPriority) saveID(id int64) {
+ jp.prio = append(jp.prio, id)
+}
+
+func genProgRequest(fuzzer *Fuzzer, rnd *rand.Rand) *Request {
+ p := fuzzer.target.Generate(rnd,
+ prog.RecommendedCalls,
+ fuzzer.ChoiceTable())
+ return &Request{
+ Prog: p,
+ NeedSignal: true,
+ stat: statGenerate,
+ }
+}
+
+func mutateProgRequest(fuzzer *Fuzzer, rnd *rand.Rand) *Request {
+ p := fuzzer.Corpus.chooseProgram(rnd)
+ if p == nil {
+ return nil
+ }
+ newP := p.Clone()
+ newP.Mutate(rnd,
+ prog.RecommendedCalls,
+ fuzzer.ChoiceTable(),
+ fuzzer.Config.NoMutateCalls,
+ fuzzer.Corpus.Programs(),
+ )
+ return &Request{
+ Prog: newP,
+ NeedSignal: true,
+ stat: statFuzz,
+ }
+}
+
+func candidateRequest(input Candidate) *Request {
+ flags := ProgCandidate
+ if input.Minimized {
+ flags |= ProgMinimized
+ }
+ if input.Smashed {
+ flags |= ProgSmashed
+ }
+ return &Request{
+ Prog: input.Prog,
+ NeedSignal: true,
+ stat: statCandidate,
+ flags: flags,
+ }
+}
+
+// triageJob are programs for which we noticed potential new coverage during
+// first execution. But we are not sure yet if the coverage is real or not.
+// During triage we understand if these programs in fact give new coverage,
+// and if yes, minimize them and add to corpus.
+type triageJob struct {
+ p *prog.Prog
+ call int
+ info ipc.CallInfo
+ newSignal signal.Signal
+ flags ProgTypes
+ jobPriority
+}
+
+func triageJobPrio(flags ProgTypes) jobPriority {
+ if flags&ProgCandidate > 0 {
+ return newJobPriority(candidateTriagePrio)
+ }
+ return newJobPriority(triagePrio)
+}
+
+func (job *triageJob) run(fuzzer *Fuzzer) {
+ callName := ".extra"
+ logCallName := "extra"
+ if job.call != -1 {
+ callName = job.p.Calls[job.call].Meta.Name
+ logCallName = fmt.Sprintf("call #%v %v", job.call, callName)
+ }
+ fuzzer.Logf(3, "triaging input for %v (new signal=%v)", logCallName, job.newSignal.Len())
+ // Compute input coverage and non-flaky signal for minimization.
+ info, stop := job.deflake(fuzzer)
+ if stop || info.newStableSignal.Empty() {
+ return
+ }
+ if job.flags&ProgMinimized == 0 {
+ stop = job.minimize(fuzzer, info.newStableSignal)
+ if stop {
+ return
+ }
+ }
+ data := job.p.Serialize()
+ fuzzer.Logf(2, "added new input for %q to the corpus:\n%s",
+ logCallName, string(data))
+ if job.flags&ProgSmashed == 0 {
+ fuzzer.startJob(&smashJob{
+ p: job.p.Clone(),
+ call: job.call,
+ jobPriority: newJobPriority(smashPrio),
+ })
+ }
+ fuzzer.Corpus.Save(job.p, info.stableSignal, hash.Hash(data))
+ if fuzzer.Config.NewInputs != nil {
+ select {
+ case <-fuzzer.ctx.Done():
+ case fuzzer.Config.NewInputs <- rpctype.Input{
+ Call: callName,
+ CallID: job.call,
+ Prog: data,
+ Signal: info.stableSignal.Serialize(),
+ Cover: info.cover.Serialize(),
+ RawCover: info.rawCover,
+ }:
+ }
+ }
+}
+
+type deflakedCover struct {
+ stableSignal signal.Signal
+ newStableSignal signal.Signal
+ cover cover.Cover
+ rawCover []uint32
+}
+
+func (job *triageJob) deflake(fuzzer *Fuzzer) (info deflakedCover, stop bool) {
+ const signalRuns = 3
+ var notExecuted int
+ for i := 0; i < signalRuns; i++ {
+ result := fuzzer.exec(job, &Request{
+ Prog: job.p,
+ NeedSignal: true,
+ NeedCover: true,
+ NeedRawCover: fuzzer.Config.FetchRawCover,
+ stat: statTriage,
+ })
+ if result.Stop {
+ stop = true
+ return
+ }
+ if !reexecutionSuccess(result.Info, &job.info, job.call) {
+ // The call was not executed or failed.
+ notExecuted++
+ if notExecuted >= signalRuns/2+1 {
+ stop = true
+ return // if happens too often, give up
+ }
+ continue
+ }
+ thisSignal, thisCover := getSignalAndCover(job.p, result.Info, job.call)
+ if len(info.rawCover) == 0 && fuzzer.Config.FetchRawCover {
+ info.rawCover = thisCover
+ }
+ if i == 0 {
+ info.stableSignal = thisSignal
+ info.newStableSignal = job.newSignal.Intersection(thisSignal)
+ } else {
+ info.stableSignal = info.stableSignal.Intersection(thisSignal)
+ info.newStableSignal = info.newStableSignal.Intersection(thisSignal)
+ }
+ if info.newStableSignal.Empty() {
+ return
+ }
+ info.cover.Merge(thisCover)
+ }
+ return
+}
+
+func (job *triageJob) minimize(fuzzer *Fuzzer, newSignal signal.Signal) (stop bool) {
+ const minimizeAttempts = 3
+ job.p, job.call = prog.Minimize(job.p, job.call, false,
+ func(p1 *prog.Prog, call1 int) bool {
+ if stop {
+ return false
+ }
+ for i := 0; i < minimizeAttempts; i++ {
+ result := fuzzer.exec(job, &Request{
+ Prog: p1,
+ NeedSignal: true,
+ stat: statMinimize,
+ })
+ if result.Stop {
+ stop = true
+ return false
+ }
+ info := result.Info
+ if !reexecutionSuccess(info, &job.info, call1) {
+ // The call was not executed or failed.
+ continue
+ }
+ thisSignal, _ := getSignalAndCover(p1, info, call1)
+ if newSignal.Intersection(thisSignal).Len() == newSignal.Len() {
+ return true
+ }
+ }
+ return false
+ })
+ return stop
+}
+
+func reexecutionSuccess(info *ipc.ProgInfo, oldInfo *ipc.CallInfo, call int) bool {
+ if info == nil || len(info.Calls) == 0 {
+ return false
+ }
+ if call != -1 {
+ // Don't minimize calls from successful to unsuccessful.
+ // Successful calls are much more valuable.
+ if oldInfo.Errno == 0 && info.Calls[call].Errno != 0 {
+ return false
+ }
+ return len(info.Calls[call].Signal) != 0
+ }
+ return len(info.Extra.Signal) != 0
+}
+
+func getSignalAndCover(p *prog.Prog, info *ipc.ProgInfo, call int) (signal.Signal, []uint32) {
+ inf := &info.Extra
+ if call != -1 {
+ inf = &info.Calls[call]
+ }
+ return signal.FromRaw(inf.Signal, signalPrio(p, inf, call)), inf.Cover
+}
+
+type smashJob struct {
+ p *prog.Prog
+ call int
+ jobPriority
+}
+
+func (job *smashJob) run(fuzzer *Fuzzer) {
+ fuzzer.Logf(2, "smashing the program %s (call=%d):", job.p, job.call)
+ if fuzzer.Config.Comparisons && job.call >= 0 {
+ fuzzer.startJob(&hintsJob{
+ p: job.p.Clone(),
+ call: job.call,
+ jobPriority: newJobPriority(smashPrio),
+ })
+ }
+
+ const iters = 100
+ rnd := fuzzer.rand()
+ for i := 0; i < iters; i++ {
+ p := job.p.Clone()
+ p.Mutate(rnd, prog.RecommendedCalls,
+ fuzzer.ChoiceTable(),
+ fuzzer.Config.NoMutateCalls,
+ fuzzer.Corpus.Programs())
+ result := fuzzer.exec(job, &Request{
+ Prog: p,
+ NeedSignal: true,
+ stat: statSmash,
+ })
+ if result.Stop {
+ return
+ }
+ if fuzzer.Config.Collide {
+ result := fuzzer.exec(job, &Request{
+ Prog: randomCollide(p, rnd),
+ stat: statCollide,
+ })
+ if result.Stop {
+ return
+ }
+ }
+ }
+ if fuzzer.Config.FaultInjection && job.call >= 0 {
+ job.faultInjection(fuzzer)
+ }
+}
+
+func randomCollide(origP *prog.Prog, rnd *rand.Rand) *prog.Prog {
+ if rnd.Intn(5) == 0 {
+ // Old-style collide with a 20% probability.
+ p, err := prog.DoubleExecCollide(origP, rnd)
+ if err == nil {
+ return p
+ }
+ }
+ if rnd.Intn(4) == 0 {
+ // Duplicate random calls with a 20% probability (25% * 80%).
+ p, err := prog.DupCallCollide(origP, rnd)
+ if err == nil {
+ return p
+ }
+ }
+ p := prog.AssignRandomAsync(origP, rnd)
+ if rnd.Intn(2) != 0 {
+ prog.AssignRandomRerun(p, rnd)
+ }
+ return p
+}
+
+func (job *smashJob) faultInjection(fuzzer *Fuzzer) {
+ for nth := 1; nth <= 100; nth++ {
+ fuzzer.Logf(2, "injecting fault into call %v, step %v",
+ job.call, nth)
+ newProg := job.p.Clone()
+ newProg.Calls[job.call].Props.FailNth = nth
+ result := fuzzer.exec(job, &Request{
+ Prog: job.p,
+ stat: statSmash,
+ })
+ if result.Stop {
+ return
+ }
+ info := result.Info
+ if info != nil && len(info.Calls) > job.call &&
+ info.Calls[job.call].Flags&ipc.CallFaultInjected == 0 {
+ break
+ }
+ }
+}
+
+type hintsJob struct {
+ p *prog.Prog
+ call int
+ jobPriority
+}
+
+func (job *hintsJob) run(fuzzer *Fuzzer) {
+ // First execute the original program to dump comparisons from KCOV.
+ p := job.p
+ result := fuzzer.exec(job, &Request{
+ Prog: p,
+ NeedHints: true,
+ stat: statSeed,
+ })
+ if result.Stop || result.Info == nil {
+ return
+ }
+ // Then mutate the initial program for every match between
+ // a syscall argument and a comparison operand.
+ // Execute each of such mutants to check if it gives new coverage.
+ var stop bool
+ p.MutateWithHints(job.call, result.Info.Calls[job.call].Comps,
+ func(p *prog.Prog) {
+ if stop {
+ return
+ }
+ result := fuzzer.exec(job, &Request{
+ Prog: p,
+ NeedSignal: true,
+ stat: statHint,
+ })
+ stop = stop || result.Stop
+ })
+}
diff --git a/pkg/fuzzer/prio_queue.go b/pkg/fuzzer/prio_queue.go
new file mode 100644
index 000000000..ea8f448c9
--- /dev/null
+++ b/pkg/fuzzer/prio_queue.go
@@ -0,0 +1,100 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package fuzzer
+
+import (
+ "container/heap"
+ "sync"
+)
+
+type priority []int64
+
+func (p priority) greaterThan(other priority) bool {
+ for i := range p {
+ if i >= len(other) || p[i] > other[i] {
+ return true
+ }
+ if p[i] < other[i] {
+ return false
+ }
+ }
+ return false
+}
+
+type priorityQueue[T any] struct {
+ impl priorityQueueImpl[T]
+ c *sync.Cond
+}
+
+func makePriorityQueue[T any]() *priorityQueue[T] {
+ return &priorityQueue[T]{
+ c: sync.NewCond(&sync.Mutex{}),
+ }
+}
+
+func (pq *priorityQueue[T]) Len() int {
+ pq.c.L.Lock()
+ defer pq.c.L.Unlock()
+ return pq.impl.Len()
+}
+
+func (pq *priorityQueue[T]) push(item *priorityQueueItem[T]) {
+ pq.c.L.Lock()
+ defer pq.c.L.Unlock()
+ heap.Push(&pq.impl, item)
+ pq.c.Signal()
+}
+
+// pop() blocks until there's input.
+func (pq *priorityQueue[T]) pop() *priorityQueueItem[T] {
+ pq.c.L.Lock()
+ defer pq.c.L.Unlock()
+ for pq.impl.Len() == 0 {
+ pq.c.Wait()
+ }
+ return heap.Pop(&pq.impl).(*priorityQueueItem[T])
+}
+
+func (pq *priorityQueue[T]) tryPop() *priorityQueueItem[T] {
+ pq.c.L.Lock()
+ defer pq.c.L.Unlock()
+ if len(pq.impl) == 0 {
+ return nil
+ }
+ return heap.Pop(&pq.impl).(*priorityQueueItem[T])
+}
+
+// The implementation below is based on the example provided
+// by https://pkg.go.dev/container/heap.
+
+type priorityQueueItem[T any] struct {
+ value T
+ prio priority
+}
+
+type priorityQueueImpl[T any] []*priorityQueueItem[T]
+
+func (pq priorityQueueImpl[T]) Len() int { return len(pq) }
+
+func (pq priorityQueueImpl[T]) Less(i, j int) bool {
+ // We want Pop to give us the highest, not lowest,
+ // priority so we use greater than here.
+ return pq[i].prio.greaterThan(pq[j].prio)
+}
+
+func (pq priorityQueueImpl[T]) Swap(i, j int) {
+ pq[i], pq[j] = pq[j], pq[i]
+}
+
+func (pq *priorityQueueImpl[T]) Push(x any) {
+ *pq = append(*pq, x.(*priorityQueueItem[T]))
+}
+
+func (pq *priorityQueueImpl[T]) Pop() any {
+ n := len(*pq)
+ item := (*pq)[n-1]
+ (*pq)[n-1] = nil
+ *pq = (*pq)[:n-1]
+ return item
+}
diff --git a/pkg/fuzzer/prio_queue_test.go b/pkg/fuzzer/prio_queue_test.go
new file mode 100644
index 000000000..b2abdb01b
--- /dev/null
+++ b/pkg/fuzzer/prio_queue_test.go
@@ -0,0 +1,44 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package fuzzer
+
+import (
+ "sync"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestPriority(t *testing.T) {
+ assert.True(t, priority{1, 2}.greaterThan(priority{1, 1}))
+ assert.True(t, priority{3, 2}.greaterThan(priority{2, 3}))
+ assert.True(t, priority{1, -5}.greaterThan(priority{1, -10}))
+}
+
+func TestPrioQueueOrder(t *testing.T) {
+ pq := makePriorityQueue[int]()
+ pq.push(&priorityQueueItem[int]{value: 1, prio: priority{1}})
+ pq.push(&priorityQueueItem[int]{value: 3, prio: priority{3}})
+ pq.push(&priorityQueueItem[int]{value: 2, prio: priority{2}})
+
+ assert.Equal(t, 3, pq.pop().value)
+ assert.Equal(t, 2, pq.pop().value)
+ assert.Equal(t, 1, pq.pop().value)
+ assert.Nil(t, pq.tryPop())
+}
+
+func TestPrioQueueWait(t *testing.T) {
+ var wg sync.WaitGroup
+ pq := makePriorityQueue[int]()
+ assert.Nil(t, pq.tryPop())
+
+ wg.Add(1)
+ go func() {
+ assert.Equal(t, 10, pq.pop().value)
+ wg.Done()
+ }()
+
+ pq.push(&priorityQueueItem[int]{value: 10, prio: priority{1}})
+ wg.Wait()
+}
diff --git a/pkg/fuzzer/stats.go b/pkg/fuzzer/stats.go
new file mode 100644
index 000000000..17bc6131c
--- /dev/null
+++ b/pkg/fuzzer/stats.go
@@ -0,0 +1,26 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package fuzzer
+
+const (
+ statGenerate = "exec gen"
+ statFuzz = "exec fuzz"
+ statCandidate = "exec candidate"
+ statTriage = "exec triage"
+ statMinimize = "exec minimize"
+ statSmash = "exec smash"
+ statHint = "exec hints"
+ statSeed = "exec seeds"
+ statCollide = "exec collide"
+ statExecTotal = "exec total"
+ statBufferTooSmall = "buffer too small"
+)
+
+func (fuzzer *Fuzzer) GrabStats() map[string]uint64 {
+ fuzzer.mu.Lock()
+ defer fuzzer.mu.Unlock()
+ ret := fuzzer.stats
+ fuzzer.stats = map[string]uint64{}
+ return ret
+}