aboutsummaryrefslogtreecommitdiffstats
path: root/syz-manager
diff options
context:
space:
mode:
authorAleksandr Nogikh <nogikh@google.com>2024-07-16 18:12:12 +0200
committerAleksandr Nogikh <nogikh@google.com>2024-07-17 09:50:01 +0000
commit3e908871f2a4ad7e80057fb82b2fc7cb34f74dcb (patch)
tree14e80403db7ce53772863cc6f7517e483e18d6ec /syz-manager
parent3bdf1c5586d449bb17aa0373b45b5b3bbeedda46 (diff)
syz-manager: tidy ReproResult
There have been some mess and duplication around Crash/ReproResult data structures. As a result, we've been attempting to upload repro failure logs to the dashboard for bugs, which did not originate from the dashboard. It litters the syz-manager logs. Refactor the code.
Diffstat (limited to 'syz-manager')
-rw-r--r--syz-manager/hub.go3
-rw-r--r--syz-manager/manager.go59
-rw-r--r--syz-manager/repro.go19
-rw-r--r--syz-manager/repro_test.go4
4 files changed, 42 insertions, 43 deletions
diff --git a/syz-manager/hub.go b/syz-manager/hub.go
index ba3a14ad3..34d061a83 100644
--- a/syz-manager/hub.go
+++ b/syz-manager/hub.go
@@ -7,7 +7,6 @@ import (
"fmt"
"net/http"
"strings"
- "sync/atomic"
"time"
"github.com/google/syzkaller/pkg/auth"
@@ -79,7 +78,6 @@ type HubConnector struct {
hubReproQueue chan *Crash
needMoreRepros func() bool
keyGet keyGetter
- reproSeq atomic.Int64
statSendProgAdd *stats.Val
statSendProgDel *stats.Val
@@ -309,7 +307,6 @@ func (hc *HubConnector) processRepros(repros [][]byte) int {
hc.hubReproQueue <- &Crash{
fromHub: true,
Report: &report.Report{
- Title: fmt.Sprintf("external repro #%d", hc.reproSeq.Add(1)),
Type: typ,
Output: repro,
},
diff --git a/syz-manager/manager.go b/syz-manager/manager.go
index dbd80c063..2614070cf 100644
--- a/syz-manager/manager.go
+++ b/syz-manager/manager.go
@@ -163,6 +163,19 @@ type Crash struct {
*report.Report
}
+func (c *Crash) FullTitle() string {
+ if c.Report.Title != "" {
+ return c.Report.Title
+ }
+ // Just use some unique, but stable titles.
+ if c.fromDashboard {
+ return fmt.Sprintf("dashboard crash %p", c)
+ } else if c.fromHub {
+ return fmt.Sprintf("crash from hub %p", c)
+ }
+ panic("the crash is expected to have a report")
+}
+
func main() {
if prog.GitRevision == "" {
log.Fatalf("bad syz-manager build: build with make, run bin/syz-manager")
@@ -369,14 +382,11 @@ func (mgr *Manager) writeBench() {
}
type ReproResult struct {
- report0 *report.Report // the original report we started reproducing
- repro *repro.Result
- strace *repro.StraceResult
- stats *repro.Stats
- err error
- fromHub bool
- fromDashboard bool
- originalTitle string // crash title before we started bug reproduction
+ crash *Crash // the original crash
+ repro *repro.Result
+ strace *repro.StraceResult
+ stats *repro.Stats
+ err error
}
func (mgr *Manager) processFuzzingResults(ctx context.Context) {
@@ -399,12 +409,12 @@ func (mgr *Manager) processFuzzingResults(ctx context.Context) {
reportReproError(res.err)
}
if res.repro == nil {
- if res.fromHub {
- log.Logf(1, "repro '%v' came from syz-hub, not reporting the failure",
- res.report0.Title)
+ if res.crash.Title == "" {
+ log.Logf(1, "repro '%v' not from dashboard, so not reporting the failure",
+ res.crash.FullTitle())
} else {
- log.Logf(1, "report repro failure of '%v'", res.report0.Title)
- mgr.saveFailedRepro(res.report0, res.stats)
+ log.Logf(1, "report repro failure of '%v'", res.crash.Title)
+ mgr.saveFailedRepro(res.crash.Report, res.stats)
}
} else {
mgr.saveRepro(res)
@@ -465,13 +475,10 @@ func reportReproError(err error) {
func (mgr *Manager) runRepro(crash *Crash) *ReproResult {
res, stats, err := repro.Run(crash.Output, mgr.cfg, mgr.enabledFeatures, mgr.reporter, mgr.pool)
ret := &ReproResult{
- report0: crash.Report,
- repro: res,
- stats: stats,
- err: err,
- fromHub: crash.fromHub,
- fromDashboard: crash.fromDashboard,
- originalTitle: crash.Title,
+ crash: crash,
+ repro: res,
+ stats: stats,
+ err: err,
}
if err == nil && res != nil && mgr.cfg.StraceBin != "" {
const straceAttempts = 2
@@ -1010,7 +1017,7 @@ func (mgr *Manager) saveRepro(res *ReproResult) {
progText := repro.Prog.Serialize()
// Append this repro to repro list to send to hub if it didn't come from hub originally.
- if !res.fromHub {
+ if !res.crash.fromHub {
progForHub := []byte(fmt.Sprintf("# %+v\n# %v\n# %v\n%s",
repro.Opts, repro.Report.Title, mgr.cfg.Tag, progText))
mgr.mu.Lock()
@@ -1065,7 +1072,7 @@ func (mgr *Manager) saveRepro(res *ReproResult) {
ReproC: cprogText,
ReproLog: truncateReproLog(fullReproLog(res.stats)),
Assets: mgr.uploadReproAssets(repro),
- OriginalTitle: res.originalTitle,
+ OriginalTitle: res.crash.Title,
}
setGuiltyFiles(dc, report)
if _, err := mgr.dash.ReportCrash(dc); err != nil {
@@ -1603,7 +1610,6 @@ func (mgr *Manager) dashboardReporter() {
}
func (mgr *Manager) dashboardReproTasks() {
- seq := 0
for range time.NewTicker(20 * time.Minute).C {
if !mgr.reproMgr.CanReproMore() {
// We don't need reproducers at the moment.
@@ -1615,15 +1621,10 @@ func (mgr *Manager) dashboardReproTasks() {
continue
}
if len(resp.CrashLog) > 0 {
- title := resp.Title
- if title == "" {
- seq++
- title = fmt.Sprintf("repro #%d from the dashboard", seq)
- }
mgr.externalReproQueue <- &Crash{
fromDashboard: true,
Report: &report.Report{
- Title: title,
+ Title: resp.Title,
Output: resp.CrashLog,
},
}
diff --git a/syz-manager/repro.go b/syz-manager/repro.go
index 8953e3b64..8a4e75c27 100644
--- a/syz-manager/repro.go
+++ b/syz-manager/repro.go
@@ -104,15 +104,16 @@ func (m *reproManager) Enqueue(crash *Crash) {
m.mu.Lock()
defer m.mu.Unlock()
- if m.onlyOnce && m.attempted[crash.Title] {
+ title := crash.FullTitle()
+ if m.onlyOnce && m.attempted[title] {
// Try to reproduce each bug at most 1 time in this mode.
// Since we don't upload bugs/repros to dashboard, it likely won't have
// the reproducer even if we succeeded last time, and will repeatedly
// say it needs a repro.
return
}
- log.Logf(1, "scheduled a reproduction of '%v'", crash.Title)
- m.attempted[crash.Title] = true
+ log.Logf(1, "scheduled a reproduction of '%v'", title)
+ m.attempted[title] = true
m.queue = append(m.queue, crash)
// Ping the loop.
@@ -127,7 +128,7 @@ func (m *reproManager) popCrash() *Crash {
defer m.mu.Unlock()
for i, crash := range m.queue {
- if m.reproducing[crash.Title] {
+ if m.reproducing[crash.FullTitle()] {
continue
}
m.queue = slices.Delete(m.queue, i, i+1)
@@ -162,7 +163,7 @@ func (m *reproManager) Loop(ctx context.Context) {
}
m.mu.Lock()
- m.reproducing[crash.Title] = true
+ m.reproducing[crash.FullTitle()] = true
m.adjustPoolSizeLocked()
m.mu.Unlock()
@@ -173,7 +174,7 @@ func (m *reproManager) Loop(ctx context.Context) {
m.handle(crash)
m.mu.Lock()
- delete(m.reproducing, crash.Title)
+ delete(m.reproducing, crash.FullTitle())
m.adjustPoolSizeLocked()
m.mu.Unlock()
@@ -184,7 +185,7 @@ func (m *reproManager) Loop(ctx context.Context) {
}
func (m *reproManager) handle(crash *Crash) {
- log.Logf(0, "start reproducing '%v'", crash.Title)
+ log.Logf(0, "start reproducing '%v'", crash.FullTitle())
res := m.mgr.runRepro(crash)
@@ -195,7 +196,7 @@ func (m *reproManager) handle(crash *Crash) {
title = res.repro.Report.Title
}
log.Logf(0, "repro finished '%v', repro=%v crepro=%v desc='%v' hub=%v from_dashboard=%v",
- res.report0.Title, res.repro != nil, crepro, title, res.fromHub, res.fromDashboard,
+ crash.FullTitle(), res.repro != nil, crepro, title, crash.fromHub, crash.fromDashboard,
)
m.Done <- res
}
@@ -206,7 +207,7 @@ func (m *reproManager) adjustPoolSizeLocked() {
// We process same-titled crashes sequentially, so only count unique ones.
uniqueTitles := maps.Clone(m.reproducing)
for _, crash := range m.queue {
- uniqueTitles[crash.Title] = true
+ uniqueTitles[crash.FullTitle()] = true
}
needRepros := len(uniqueTitles)
diff --git a/syz-manager/repro_test.go b/syz-manager/repro_test.go
index 61c6dc4b4..79b482d36 100644
--- a/syz-manager/repro_test.go
+++ b/syz-manager/repro_test.go
@@ -53,8 +53,8 @@ func TestReproManager(t *testing.T) {
assert.EqualValues(t, 3, mock.reserved.Load())
// Pretend that reproducers have finished.
- called.ret <- &ReproResult{report0: &report.Report{}}
- called2.ret <- &ReproResult{report0: &report.Report{}}
+ called.ret <- &ReproResult{crash: &Crash{fromHub: true}}
+ called2.ret <- &ReproResult{crash: &Crash{fromHub: true}}
// Wait until the number of reserved VMs goes to 0.
for i := 0; i < 100; i++ {