diff options
| author | Aleksandr Nogikh <nogikh@google.com> | 2026-01-25 21:01:16 +0000 |
|---|---|---|
| committer | Aleksandr Nogikh <nogikh@google.com> | 2026-01-26 15:43:37 +0000 |
| commit | 30c4bb8427fe0d12bfff0fea81bf31d04910129e (patch) | |
| tree | 6ff01bffdf8f23a3e76a1218a4b4af235e137f29 /pkg/manager/diff/manager.go | |
| parent | dd170fcec9a15df2760e4cd84d22aab51ef0d172 (diff) | |
pkg/manager/diff: add tests
Refactor the package to support testing and add
a number of basic tests.
Diffstat (limited to 'pkg/manager/diff/manager.go')
| -rw-r--r-- | pkg/manager/diff/manager.go | 71 |
1 files changed, 47 insertions, 24 deletions
diff --git a/pkg/manager/diff/manager.go b/pkg/manager/diff/manager.go index cb19add63..ca6968ea1 100644 --- a/pkg/manager/diff/manager.go +++ b/pkg/manager/diff/manager.go @@ -13,6 +13,7 @@ import ( "sync" "time" + "github.com/google/syzkaller/pkg/flatrpc" "github.com/google/syzkaller/pkg/fuzzer/queue" "github.com/google/syzkaller/pkg/log" "github.com/google/syzkaller/pkg/manager" @@ -44,6 +45,9 @@ type Config struct { // the particular base kernel has already been seen to crash with the given title. // It helps reduce the number of unnecessary reproductions. IgnoreCrash func(context.Context, string) (bool, error) + + runner runner + runRepro func(context.Context, []byte, repro.Environment) (*repro.Result, *repro.Stats, error) } func (cfg *Config) TriageDeadline() <-chan time.Time { @@ -88,6 +92,13 @@ func Run(ctx context.Context, baseCfg, newCfg *mgrconfig.Config, cfg Config) err base.source = stream new.duplicateInto = stream + if cfg.runRepro == nil { + cfg.runRepro = repro.Run + } + if cfg.runner == nil { + cfg.runner = &reproRunner{done: make(chan reproRunnerResult, 2)} + } + diffCtx := &diffContext{ cfg: cfg, doneRepro: make(chan *manager.ReproResult), @@ -115,14 +126,26 @@ func Run(ctx context.Context, baseCfg, newCfg *mgrconfig.Config, cfg Config) err return eg.Wait() } +type Kernel interface { + Loop(ctx context.Context) error + Crashes() <-chan *report.Report + TriageProgress() float64 + ProgsPerArea() map[string]int + CoverFilters() manager.CoverageFilters + Config() *mgrconfig.Config + Pool() *vm.Dispatcher + Features() flatrpc.Feature + Reporter() *report.Reporter +} + type diffContext struct { cfg Config store *manager.DiffFuzzerStore http *manager.HTTPServer doneRepro chan *manager.ReproResult - base *kernelContext - new *kernelContext + base Kernel + new Kernel patchedOnly chan *Bug mu sync.Mutex @@ -138,7 +161,7 @@ const ( func (dc *diffContext) Loop(ctx context.Context) error { g, groupCtx := errgroup.WithContext(ctx) - reproLoop := manager.NewReproLoop(dc, dc.new.pool.Total()-dc.new.cfg.FuzzingVMs, false) + reproLoop := manager.NewReproLoop(dc, dc.new.Pool().Total()-dc.new.Config().FuzzingVMs, false) if dc.http != nil { dc.http.ReproLoop = reproLoop g.Go(func() error { @@ -163,7 +186,7 @@ func (dc *diffContext) Loop(ctx context.Context) error { g.Go(func() error { return dc.base.Loop(groupCtx) }) g.Go(func() error { return dc.new.Loop(groupCtx) }) - runner := &reproRunner{done: make(chan reproRunnerResult, 2), kernel: dc.base} + runner := dc.cfg.runner statTimer := time.NewTicker(5 * time.Minute) loop: for { @@ -177,10 +200,10 @@ loop: } data, _ := json.MarshalIndent(vals, "", " ") log.Logf(0, "STAT %s", data) - case rep := <-dc.base.crashes: + case rep := <-dc.base.Crashes(): log.Logf(1, "base crash: %v", rep.Title) dc.reportBaseCrash(groupCtx, rep) - case ret := <-runner.done: + case ret := <-runner.Results(): dc.handleReproResult(groupCtx, ret, reproLoop) case ret := <-dc.doneRepro: // We have finished reproducing a crash from the patched instance. @@ -195,7 +218,7 @@ loop: dc.store.UpdateStatus(ret.Repro.Report.Title, manager.DiffBugStatusVerifying) g.Go(func() error { - runner.Run(groupCtx, ret.Repro, ret.Crash.FullRepro) + runner.Run(groupCtx, dc.base, ret.Repro, ret.Crash.FullRepro) return nil }) } else { @@ -204,7 +227,7 @@ loop: dc.store.UpdateStatus(origTitle, manager.DiffBugStatusCompleted) } dc.store.SaveRepro(ret) - case rep := <-dc.new.crashes: + case rep := <-dc.new.Crashes(): // A new crash is found on the patched instance. crash := &manager.Crash{Report: rep} need := dc.NeedRepro(crash) @@ -241,9 +264,9 @@ func (dc *diffContext) handleReproResult(ctx context.Context, ret reproRunnerRes Repro: ret.repro, }: } - log.Logf(0, "patched-only: %s", ret.reproReport.Title) // Now that we know this bug only affects the patch kernel, we can spend more time // generating a minimalistic repro and a C repro. + log.Logf(0, "patched-only: %s", ret.reproReport.Title) if !ret.fullRepro { reproLoop.Enqueue(&manager.Crash{ Report: &report.Report{ @@ -290,21 +313,21 @@ func (dc *diffContext) reportBaseCrash(ctx context.Context, rep *report.Report) } func (dc *diffContext) waitCorpusTriage(ctx context.Context, threshold float64) chan struct{} { - const backOffTime = 30 * time.Second + const triageCheckPeriod = 30 * time.Second ret := make(chan struct{}) go func() { for { - select { - case <-time.After(backOffTime): - case <-ctx.Done(): - return - } - triaged := dc.new.triageProgress() + triaged := dc.new.TriageProgress() if triaged >= threshold { log.Logf(0, "triaged %.1f%% of the corpus", triaged*100.0) close(ret) return } + select { + case <-time.After(triageCheckPeriod): + case <-ctx.Done(): + return + } } }() return ret @@ -328,7 +351,7 @@ func (dc *diffContext) monitorPatchedCoverage(ctx context.Context) error { // By this moment, we must have coverage filters already filled out. focusPCs := 0 // The last one is "everything else", so it's not of interest. - coverFilters := dc.new.coverFilters + coverFilters := dc.new.CoverFilters() for i := 0; i < len(coverFilters.Areas)-1; i++ { focusPCs += len(coverFilters.Areas[i].CoverPCs) } @@ -344,7 +367,7 @@ func (dc *diffContext) monitorPatchedCoverage(ctx context.Context) error { case <-ctx.Done(): return nil } - focusAreaStats := dc.new.progsPerArea() + focusAreaStats := dc.new.ProgsPerArea() if focusAreaStats[symbolsArea]+focusAreaStats[filesArea]+focusAreaStats[includesArea] > 0 { log.Logf(0, "fuzzer has reached the modified code (%d + %d + %d), continuing fuzzing", focusAreaStats[symbolsArea], focusAreaStats[filesArea], focusAreaStats[includesArea]) @@ -391,11 +414,11 @@ func (dc *diffContext) RunRepro(ctx context.Context, crash *manager.Crash) *mana dc.reproAttempts[crash.Title]++ dc.mu.Unlock() - res, stats, err := repro.Run(ctx, crash.Output, repro.Environment{ - Config: dc.new.cfg, - Features: dc.new.features, - Reporter: dc.new.reporter, - Pool: dc.new.pool, + res, stats, err := dc.cfg.runRepro(ctx, crash.Output, repro.Environment{ + Config: dc.new.Config(), + Features: dc.new.Features(), + Reporter: dc.new.Reporter(), + Pool: dc.new.Pool(), Fast: !crash.FullRepro, }) if res != nil && res.Report != nil { @@ -419,5 +442,5 @@ func (dc *diffContext) RunRepro(ctx context.Context, crash *manager.Crash) *mana } func (dc *diffContext) ResizeReproPool(size int) { - dc.new.pool.ReserveForRun(size) + dc.new.Pool().ReserveForRun(size) } |
