aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2017-11-29 13:23:42 +0100
committerDmitry Vyukov <dvyukov@google.com>2017-11-29 14:36:51 +0100
commit34f2c2332bedc41c5d22eaa35a555720cae3a1c7 (patch)
treebeb6f2d3d1399fef2c4f9d511e9d5d0804609261
parent19d272a98fd5c705b0345798da1a3e3d5b37f91f (diff)
pkg/report: add Output to Report
Whole raw output is indivisble part of Report, currently we always pass Output separately along with Report. Make Output a Report field. Then, put whole Report into manager Crash and repro context and Result. There is little point in passing Report as aa bunch of separate fields.
-rw-r--r--pkg/report/freebsd.go4
-rw-r--r--pkg/report/linux.go4
-rw-r--r--pkg/report/report.go2
-rw-r--r--pkg/repro/repro.go32
-rw-r--r--syz-ci/jobs.go4
-rw-r--r--syz-manager/manager.go106
-rw-r--r--tools/syz-crush/crush.go4
-rw-r--r--vm/vm.go34
8 files changed, 93 insertions, 97 deletions
diff --git a/pkg/report/freebsd.go b/pkg/report/freebsd.go
index 46efea5d9..dd9c8178d 100644
--- a/pkg/report/freebsd.go
+++ b/pkg/report/freebsd.go
@@ -34,7 +34,9 @@ func (ctx *freebsd) ContainsCrash(output []byte) bool {
}
func (ctx *freebsd) Parse(output []byte) *Report {
- rep := new(Report)
+ rep := &Report{
+ Output: output,
+ }
var oops *oops
for pos := 0; pos < len(output); {
next := bytes.IndexByte(output[pos:], '\n')
diff --git a/pkg/report/linux.go b/pkg/report/linux.go
index 3804a134e..0efab7c16 100644
--- a/pkg/report/linux.go
+++ b/pkg/report/linux.go
@@ -78,7 +78,9 @@ func (ctx *linux) ContainsCrash(output []byte) bool {
func (ctx *linux) Parse(output []byte) *Report {
output = ctx.ExtractConsoleOutput(output)
- rep := new(Report)
+ rep := &Report{
+ Output: output,
+ }
var oops *oops
var textPrefix [][]byte
textLines := 0
diff --git a/pkg/report/report.go b/pkg/report/report.go
index 272c359ab..813e8ef3c 100644
--- a/pkg/report/report.go
+++ b/pkg/report/report.go
@@ -34,6 +34,8 @@ type Report struct {
Title string
// Report contains whole oops text.
Report []byte
+ // Output contains whole raw kernel output.
+ Output []byte
// StartPos/EndPos denote region of output with oops message(s).
StartPos int
EndPos int
diff --git a/pkg/repro/repro.go b/pkg/repro/repro.go
index 35dd51628..1d9b51493 100644
--- a/pkg/repro/repro.go
+++ b/pkg/repro/repro.go
@@ -35,12 +35,9 @@ type Result struct {
Opts csource.Options
CRepro bool
Stats Stats
- // Title, Log and Report of the final crash that we reproduced.
+ // Information about the final crash that we reproduced.
// Can be different from what we started reproducing.
- Title string
- Log []byte
- Report []byte
- Corrupted bool
+ Report *report.Report
}
type context struct {
@@ -50,10 +47,7 @@ type context struct {
instances chan *instance
bootRequests chan int
stats Stats
- title string
- log []byte
- report []byte
- corrupted bool
+ report *report.Report
}
type instance struct {
@@ -153,10 +147,11 @@ func Run(crashLog []byte, cfg *mgrconfig.Config, reporter report.Reporter, vmPoo
return nil, err
}
if res != nil {
- ctx.reproLog(3, "repro crashed as (corrupted=%v):\n%s", ctx.corrupted, ctx.report)
+ ctx.reproLog(3, "repro crashed as (corrupted=%v):\n%s",
+ ctx.report.Corrupted, ctx.report.Report)
// Try to rerun the repro if the report is corrupted.
- for attempts := 0; ctx.corrupted && attempts < 3; attempts++ {
- ctx.reproLog(3, "report is corrupted, running repro again\n")
+ for attempts := 0; ctx.report.Corrupted && attempts < 3; attempts++ {
+ ctx.reproLog(3, "report is corrupted, running repro again")
if res.CRepro {
_, err = ctx.testCProg(res.Prog, res.Duration, res.Opts)
} else {
@@ -166,11 +161,9 @@ func Run(crashLog []byte, cfg *mgrconfig.Config, reporter report.Reporter, vmPoo
return nil, err
}
}
- ctx.reproLog(3, "final repro crashed as (corrupted=%v):\n%s", ctx.corrupted, ctx.report)
- res.Title = ctx.title
- res.Log = ctx.log
+ ctx.reproLog(3, "final repro crashed as (corrupted=%v):\n%s",
+ ctx.report.Corrupted, ctx.report.Report)
res.Report = ctx.report
- res.Corrupted = ctx.corrupted
res.Stats = ctx.stats
}
@@ -616,15 +609,12 @@ func (ctx *context) testImpl(inst *vm.Instance, command string, duration time.Du
if err != nil {
return false, fmt.Errorf("failed to run command in VM: %v", err)
}
- rep, output := vm.MonitorExecution(outc, errc, ctx.reporter, true)
+ rep := vm.MonitorExecution(outc, errc, ctx.reporter, true)
if rep == nil {
ctx.reproLog(2, "program did not crash")
return false, nil
}
- ctx.title = rep.Title
- ctx.log = output
- ctx.report = rep.Report
- ctx.corrupted = rep.Corrupted
+ ctx.report = rep
ctx.reproLog(2, "program crashed: %v", rep.Title)
return true, nil
}
diff --git a/syz-ci/jobs.go b/syz-ci/jobs.go
index 84b9998d6..5eb970db9 100644
--- a/syz-ci/jobs.go
+++ b/syz-ci/jobs.go
@@ -339,12 +339,12 @@ func (job *Job) testProgram(inst *vm.Instance, command string, reporter report.R
if err != nil {
return false, fmt.Errorf("failed to run binary in VM: %v", err)
}
- rep, output := vm.MonitorExecution(outc, errc, reporter, true)
+ rep := vm.MonitorExecution(outc, errc, reporter, true)
if rep == nil {
return false, nil
}
job.resp.CrashTitle = rep.Title
job.resp.CrashReport = rep.Report
- job.resp.CrashLog = output
+ job.resp.CrashLog = rep.Output
return true, nil
}
diff --git a/syz-manager/manager.go b/syz-manager/manager.go
index 8a706bfe4..6fb4cb7cc 100644
--- a/syz-manager/manager.go
+++ b/syz-manager/manager.go
@@ -107,12 +107,9 @@ type Fuzzer struct {
}
type Crash struct {
- vmIndex int
- hub bool // this crash was created based on a repro from hub
- title string
- report []byte
- log []byte
- corrupted bool
+ vmIndex int
+ hub bool // this crash was created based on a repro from hub
+ *report.Report
}
func main() {
@@ -372,7 +369,7 @@ func (mgr *Manager) vmLoop() {
mgr.mu.Unlock()
for crash := range pendingRepro {
- if reproducing[crash.title] {
+ if reproducing[crash.Title] {
continue
}
delete(pendingRepro, crash)
@@ -384,7 +381,7 @@ func (mgr *Manager) vmLoop() {
} else {
cid := &dashapi.CrashID{
BuildID: mgr.cfg.Tag,
- Title: crash.title,
+ Title: crash.Title,
}
needRepro, err := mgr.dash.NeedRepro(cid)
if err != nil {
@@ -395,8 +392,8 @@ func (mgr *Manager) vmLoop() {
}
}
}
- Logf(1, "loop: add to repro queue '%v'", crash.title)
- reproducing[crash.title] = true
+ Logf(1, "loop: add to repro queue '%v'", crash.Title)
+ reproducing[crash.Title] = true
reproQueue = append(reproQueue, crash)
}
@@ -423,10 +420,10 @@ func (mgr *Manager) vmLoop() {
instances = instances[:len(instances)-instancesPerRepro]
reproInstances += instancesPerRepro
atomic.AddUint32(&mgr.numReproducing, 1)
- Logf(1, "loop: starting repro of '%v' on instances %+v", crash.title, vmIndexes)
+ Logf(1, "loop: starting repro of '%v' on instances %+v", crash.Title, vmIndexes)
go func() {
- res, err := repro.Run(crash.log, mgr.cfg, mgr.getReporter(), mgr.vmPool, vmIndexes)
- reproDone <- &ReproResult{vmIndexes, crash.title, res, err, crash.hub}
+ res, err := repro.Run(crash.Output, mgr.cfg, mgr.getReporter(), mgr.vmPool, vmIndexes)
+ reproDone <- &ReproResult{vmIndexes, crash.Title, res, err, crash.hub}
}()
}
for !canRepro() && len(instances) != 0 {
@@ -462,7 +459,7 @@ func (mgr *Manager) vmLoop() {
if shutdown != nil && res.crash != nil && !mgr.isSuppressed(res.crash) {
needRepro := mgr.saveCrash(res.crash)
if needRepro {
- Logf(1, "loop: add pending repro for '%v'", res.crash.title)
+ Logf(1, "loop: add pending repro for '%v'", res.crash.Title)
pendingRepro[res.crash] = true
}
}
@@ -472,7 +469,7 @@ func (mgr *Manager) vmLoop() {
title := ""
if res.res != nil {
crepro = res.res.CRepro
- title = res.res.Title
+ title = res.res.Report.Title
}
Logf(1, "loop: repro on %+v finished '%v', repro=%v crepro=%v desc='%v'",
res.instances, res.title0, res.res != nil, crepro, title)
@@ -545,29 +542,26 @@ func (mgr *Manager) runInstance(index int) (*Crash, error) {
return nil, fmt.Errorf("failed to run fuzzer: %v", err)
}
- rep, output := vm.MonitorExecution(outc, errc, mgr.getReporter(), false)
+ rep := vm.MonitorExecution(outc, errc, mgr.getReporter(), false)
if rep == nil {
// This is the only "OK" outcome.
Logf(0, "vm-%v: running for %v, restarting", index, time.Since(start))
return nil, nil
}
cash := &Crash{
- vmIndex: index,
- hub: false,
- title: rep.Title,
- report: rep.Report,
- corrupted: rep.Corrupted,
- log: output,
+ vmIndex: index,
+ hub: false,
+ Report: rep,
}
return cash, nil
}
func (mgr *Manager) isSuppressed(crash *Crash) bool {
for _, re := range mgr.cfg.ParsedSuppressions {
- if !re.Match(crash.log) {
+ if !re.Match(crash.Output) {
continue
}
- Logf(1, "vm-%v: suppressing '%v' with '%v'", crash.vmIndex, crash.title, re.String())
+ Logf(1, "vm-%v: suppressing '%v' with '%v'", crash.vmIndex, crash.Title, re.String())
mgr.mu.Lock()
mgr.stats["suppressed"]++
mgr.mu.Unlock()
@@ -577,19 +571,19 @@ func (mgr *Manager) isSuppressed(crash *Crash) bool {
}
func (mgr *Manager) saveCrash(crash *Crash) bool {
- Logf(0, "vm-%v: crash: %v", crash.vmIndex, crash.title)
+ Logf(0, "vm-%v: crash: %v", crash.vmIndex, crash.Title)
mgr.mu.Lock()
mgr.stats["crashes"]++
- if !mgr.crashTypes[crash.title] {
- mgr.crashTypes[crash.title] = true
+ if !mgr.crashTypes[crash.Title] {
+ mgr.crashTypes[crash.Title] = true
mgr.stats["crash types"]++
}
mgr.mu.Unlock()
- crash.report = mgr.symbolizeReport(crash.report)
+ crash.Report.Report = mgr.symbolizeReport(crash.Report.Report)
if mgr.dash != nil {
var maintainers []string
- guiltyFile := mgr.getReporter().ExtractGuiltyFile(crash.report)
+ guiltyFile := mgr.getReporter().ExtractGuiltyFile(crash.Report.Report)
if guiltyFile != "" {
var err error
maintainers, err = mgr.getReporter().GetMaintainers(guiltyFile)
@@ -599,11 +593,11 @@ func (mgr *Manager) saveCrash(crash *Crash) bool {
}
dc := &dashapi.Crash{
BuildID: mgr.cfg.Tag,
- Title: crash.title,
- Corrupted: crash.corrupted,
+ Title: crash.Title,
+ Corrupted: crash.Corrupted,
Maintainers: maintainers,
- Log: crash.log,
- Report: crash.report,
+ Log: crash.Output,
+ Report: crash.Report.Report,
}
resp, err := mgr.dash.ReportCrash(dc)
if err != nil {
@@ -615,11 +609,11 @@ func (mgr *Manager) saveCrash(crash *Crash) bool {
}
}
- sig := hash.Hash([]byte(crash.title))
+ sig := hash.Hash([]byte(crash.Title))
id := sig.String()
dir := filepath.Join(mgr.crashdir, id)
osutil.MkdirAll(dir)
- if err := osutil.WriteFile(filepath.Join(dir, "description"), []byte(crash.title+"\n")); err != nil {
+ if err := osutil.WriteFile(filepath.Join(dir, "description"), []byte(crash.Title+"\n")); err != nil {
Logf(0, "failed to write crash: %v", err)
}
// Save up to 100 reports. If we already have 100, overwrite the oldest one.
@@ -638,12 +632,12 @@ func (mgr *Manager) saveCrash(crash *Crash) bool {
oldestTime = info.ModTime()
}
}
- osutil.WriteFile(filepath.Join(dir, fmt.Sprintf("log%v", oldestI)), crash.log)
+ osutil.WriteFile(filepath.Join(dir, fmt.Sprintf("log%v", oldestI)), crash.Output)
if len(mgr.cfg.Tag) > 0 {
osutil.WriteFile(filepath.Join(dir, fmt.Sprintf("tag%v", oldestI)), []byte(mgr.cfg.Tag))
}
- if len(crash.report) > 0 {
- osutil.WriteFile(filepath.Join(dir, fmt.Sprintf("report%v", oldestI)), crash.report)
+ if len(crash.Report.Report) > 0 {
+ osutil.WriteFile(filepath.Join(dir, fmt.Sprintf("report%v", oldestI)), crash.Report.Report)
}
return mgr.needRepro(crash)
@@ -652,10 +646,10 @@ func (mgr *Manager) saveCrash(crash *Crash) bool {
const maxReproAttempts = 3
func (mgr *Manager) needRepro(crash *Crash) bool {
- if !mgr.cfg.Reproduce || crash.corrupted {
+ if !mgr.cfg.Reproduce || crash.Corrupted {
return false
}
- sig := hash.Hash([]byte(crash.title))
+ sig := hash.Hash([]byte(crash.Title))
dir := filepath.Join(mgr.crashdir, sig.String())
if osutil.IsExist(filepath.Join(dir, "repro.prog")) {
return false
@@ -690,11 +684,12 @@ func (mgr *Manager) saveFailedRepro(desc string) {
}
func (mgr *Manager) saveRepro(res *repro.Result, hub bool) {
- res.Report = mgr.symbolizeReport(res.Report)
- dir := filepath.Join(mgr.crashdir, hash.String([]byte(res.Title)))
+ rep := res.Report
+ rep.Report = mgr.symbolizeReport(rep.Report)
+ dir := filepath.Join(mgr.crashdir, hash.String([]byte(rep.Title)))
osutil.MkdirAll(dir)
- if err := osutil.WriteFile(filepath.Join(dir, "description"), []byte(res.Title+"\n")); err != nil {
+ if err := osutil.WriteFile(filepath.Join(dir, "description"), []byte(rep.Title+"\n")); err != nil {
Logf(0, "failed to write crash: %v", err)
}
opts := fmt.Sprintf("# %+v\n", res.Opts)
@@ -703,11 +698,11 @@ func (mgr *Manager) saveRepro(res *repro.Result, hub bool) {
if len(mgr.cfg.Tag) > 0 {
osutil.WriteFile(filepath.Join(dir, "repro.tag"), []byte(mgr.cfg.Tag))
}
- if len(res.Log) > 0 {
- osutil.WriteFile(filepath.Join(dir, "repro.log"), res.Log)
+ if len(rep.Output) > 0 {
+ osutil.WriteFile(filepath.Join(dir, "repro.log"), rep.Output)
}
- if len(res.Report) > 0 {
- osutil.WriteFile(filepath.Join(dir, "repro.report"), res.Report)
+ if len(rep.Report) > 0 {
+ osutil.WriteFile(filepath.Join(dir, "repro.report"), rep.Report)
}
osutil.WriteFile(filepath.Join(dir, "repro.stats.log"), res.Stats.Log)
stats := fmt.Sprintf("Extracting prog: %s\nMinimizing prog: %s\nSimplifying prog options: %s\nExtracting C: %s\nSimplifying C: %s\n",
@@ -731,7 +726,7 @@ func (mgr *Manager) saveRepro(res *repro.Result, hub bool) {
// Append this repro to repro list to send to hub if it didn't come from hub originally.
if !hub {
progForHub := []byte(fmt.Sprintf("# %+v\n# %v\n# %v\n%s",
- res.Opts, res.Title, mgr.cfg.Tag, prog))
+ res.Opts, res.Report.Title, mgr.cfg.Tag, prog))
mgr.mu.Lock()
mgr.newRepros = append(mgr.newRepros, progForHub)
mgr.mu.Unlock()
@@ -739,7 +734,7 @@ func (mgr *Manager) saveRepro(res *repro.Result, hub bool) {
if mgr.dash != nil {
var maintainers []string
- guiltyFile := mgr.getReporter().ExtractGuiltyFile(res.Report)
+ guiltyFile := mgr.getReporter().ExtractGuiltyFile(res.Report.Report)
if guiltyFile != "" {
var err error
maintainers, err = mgr.getReporter().GetMaintainers(guiltyFile)
@@ -749,10 +744,10 @@ func (mgr *Manager) saveRepro(res *repro.Result, hub bool) {
}
dc := &dashapi.Crash{
BuildID: mgr.cfg.Tag,
- Title: res.Title,
+ Title: res.Report.Title,
Maintainers: maintainers,
- Log: res.Log,
- Report: res.Report,
+ Log: res.Report.Output,
+ Report: res.Report.Report,
ReproOpts: res.Opts.Serialize(),
ReproSyz: res.Prog.Serialize(),
ReproC: cprogText,
@@ -1123,9 +1118,10 @@ func (mgr *Manager) hubSync() {
mgr.hubReproQueue <- &Crash{
vmIndex: -1,
hub: true,
- title: "external repro",
- report: nil,
- log: repro,
+ Report: &report.Report{
+ Title: "external repro",
+ Output: repro,
+ },
}
}
diff --git a/tools/syz-crush/crush.go b/tools/syz-crush/crush.go
index 1d858790b..ca92f8cd1 100644
--- a/tools/syz-crush/crush.go
+++ b/tools/syz-crush/crush.go
@@ -109,7 +109,7 @@ func runInstance(cfg *mgrconfig.Config, reporter report.Reporter, vmPool *vm.Poo
}
log.Logf(0, "vm-%v: crushing...", index)
- rep, output := vm.MonitorExecution(outc, errc, reporter, false)
+ rep := vm.MonitorExecution(outc, errc, reporter, false)
if rep == nil {
// This is the only "OK" outcome.
log.Logf(0, "vm-%v: running long enough, restarting", index)
@@ -121,7 +121,7 @@ func runInstance(cfg *mgrconfig.Config, reporter report.Reporter, vmPool *vm.Poo
}
defer f.Close()
log.Logf(0, "vm-%v: crashed: %v, saving to %v", index, rep.Title, f.Name())
- f.Write(output)
+ f.Write(rep.Output)
}
return
}
diff --git a/vm/vm.go b/vm/vm.go
index 24a5d55ae..f27185742 100644
--- a/vm/vm.go
+++ b/vm/vm.go
@@ -100,10 +100,10 @@ func (inst *Instance) Close() {
// It detects kernel oopses in output, lost connections, hangs, etc.
// outc/errc is what vm.Instance.Run returns, reporter parses kernel output for oopses.
// If canExit is false and the program exits, it is treated as an error.
-// Returns crash report and raw output around the crash, or nil if no error happens.
+// Returns crash report, or nil if no error happens.
func MonitorExecution(outc <-chan []byte, errc <-chan error, reporter report.Reporter, canExit bool) (
- rep *report.Report, output []byte) {
- //title string, report, output []byte, crashed, timedout bool) {
+ rep *report.Report) {
+ var output []byte
waitForOutput := func() {
timer := time.NewTimer(10 * time.Second).C
for {
@@ -124,23 +124,24 @@ func MonitorExecution(outc <-chan []byte, errc <-chan error, reporter report.Rep
beforeContext = 1024 << 10
afterContext = 128 << 10
)
- extractError := func(defaultError string) (*report.Report, []byte) {
+ extractError := func(defaultError string) *report.Report {
// Give it some time to finish writing the error message.
waitForOutput()
if bytes.Contains(output, []byte("SYZ-FUZZER: PREEMPTED")) {
- return nil, nil
+ return nil
}
if !reporter.ContainsCrash(output[matchPos:]) {
if defaultError == "" {
if canExit {
- return nil, nil
+ return nil
}
defaultError = "lost connection to test machine"
}
rep := &report.Report{
- Title: defaultError,
+ Title: defaultError,
+ Output: output,
}
- return rep, output
+ return rep
}
rep := reporter.Parse(output[matchPos:])
if rep == nil {
@@ -154,7 +155,8 @@ func MonitorExecution(outc <-chan []byte, errc <-chan error, reporter report.Rep
if end > len(output) {
end = len(output)
}
- return rep, output[start:end]
+ rep.Output = output[start:end]
+ return rep
}
lastExecuteTime := time.Now()
@@ -174,7 +176,7 @@ func MonitorExecution(outc <-chan []byte, errc <-chan error, reporter report.Rep
// but wait for kernel output in case there is some delayed oops.
return extractError("")
case TimeoutErr:
- return nil, nil
+ return nil
default:
// Note: connection lost can race with a kernel oops message.
// In such case we want to return the kernel oops.
@@ -207,18 +209,20 @@ func MonitorExecution(outc <-chan []byte, errc <-chan error, reporter report.Rep
// because frequently it's the same condition.
if time.Since(lastExecuteTime) > 3*time.Minute {
rep := &report.Report{
- Title: "no output from test machine",
+ Title: "no output from test machine",
+ Output: output,
}
- return rep, output
+ return rep
}
case <-ticker.C:
tickerFired = true
rep := &report.Report{
- Title: "no output from test machine",
+ Title: "no output from test machine",
+ Output: output,
}
- return rep, output
+ return rep
case <-Shutdown:
- return nil, nil
+ return nil
}
}
}