aboutsummaryrefslogtreecommitdiffstats
path: root/pkg
diff options
context:
space:
mode:
authorTaras Madan <tarasmadan@google.com>2026-01-21 20:00:38 +0100
committerTaras Madan <tarasmadan@google.com>2026-01-21 22:46:06 +0000
commita16aed1db22739e7dea8098c79bc1963b871b5ec (patch)
treeac3968a697f9cbe78c66915abe2a8d00e58a787b /pkg
parent7ee46ad36cdaae818f74dea493b5ec30df3fe31b (diff)
all: fix context.Context usage
Diffstat (limited to 'pkg')
-rw-r--r--pkg/aflow/execute.go32
-rw-r--r--pkg/cover/file.go6
-rw-r--r--pkg/covermerger/covermerger.go14
-rw-r--r--pkg/flatrpc/conn.go8
-rw-r--r--pkg/manager/diff.go42
-rw-r--r--pkg/rpcserver/local.go38
-rw-r--r--pkg/rpcserver/rpcserver.go4
-rw-r--r--pkg/runtest/run.go118
8 files changed, 131 insertions, 131 deletions
diff --git a/pkg/aflow/execute.go b/pkg/aflow/execute.go
index 3e1a6a112..405498800 100644
--- a/pkg/aflow/execute.go
+++ b/pkg/aflow/execute.go
@@ -25,46 +25,46 @@ import (
// The workdir argument should point to a dir owned by aflow to store private data,
// it can be shared across parallel executions in the same process, and preferably
// preserved across process restarts for caching purposes.
-func (flow *Flow) Execute(c context.Context, model, workdir string, inputs map[string]any,
+func (flow *Flow) Execute(ctx context.Context, model, workdir string, inputs map[string]any,
cache *Cache, onEvent onEvent) (map[string]any, error) {
if err := flow.checkInputs(inputs); err != nil {
return nil, fmt.Errorf("flow inputs are missing: %w", err)
}
- ctx := &Context{
- Context: c,
+ c := &Context{
+ Context: ctx,
Workdir: osutil.Abs(workdir),
llmModel: model,
cache: cache,
state: maps.Clone(inputs),
onEvent: onEvent,
}
- defer ctx.close()
- if s := c.Value(stubContextKey); s != nil {
- ctx.stubContext = *s.(*stubContext)
+ defer c.close()
+ if s := ctx.Value(stubContextKey); s != nil {
+ c.stubContext = *s.(*stubContext)
}
- if ctx.timeNow == nil {
- ctx.timeNow = time.Now
+ if c.timeNow == nil {
+ c.timeNow = time.Now
}
- if ctx.generateContent == nil {
- ctx.generateContent = ctx.generateContentGemini
+ if c.generateContent == nil {
+ c.generateContent = c.generateContentGemini
}
span := &trajectory.Span{
Type: trajectory.SpanFlow,
Name: flow.Name,
}
- if err := ctx.startSpan(span); err != nil {
+ if err := c.startSpan(span); err != nil {
return nil, err
}
- flowErr := flow.Root.execute(ctx)
+ flowErr := flow.Root.execute(c)
if flowErr == nil {
- span.Results = flow.extractOutputs(ctx.state)
+ span.Results = flow.extractOutputs(c.state)
}
- if err := ctx.finishSpan(span, flowErr); err != nil {
+ if err := c.finishSpan(span, flowErr); err != nil {
return nil, err
}
- if ctx.spanNesting != 0 {
+ if c.spanNesting != 0 {
// Since we finish all spans, even on errors, we should end up at 0.
- panic(fmt.Sprintf("unbalanced spans (%v)", ctx.spanNesting))
+ panic(fmt.Sprintf("unbalanced spans (%v)", c.spanNesting))
}
return span.Results, nil
}
diff --git a/pkg/cover/file.go b/pkg/cover/file.go
index 97b6be6b7..6c54a32ed 100644
--- a/pkg/cover/file.go
+++ b/pkg/cover/file.go
@@ -51,7 +51,7 @@ func RendFileCoverage(repo, forCommit, filePath string, fileProvider covermerger
}
// nolint:revive
-func GetMergeResult(c context.Context, ns, repo, forCommit, sourceCommit, filePath string,
+func GetMergeResult(ctx context.Context, ns, repo, forCommit, sourceCommit, filePath string,
proxy covermerger.FuncProxyURI, tp coveragedb.TimePeriod) (*covermerger.MergeResult, error) {
config := &covermerger.Config{
Jobs: 1,
@@ -63,14 +63,14 @@ func GetMergeResult(c context.Context, ns, repo, forCommit, sourceCommit, filePa
}
fromDate, toDate := tp.DatesFromTo()
- csvReader, err := covermerger.InitNsRecords(c, ns, filePath, sourceCommit, fromDate, toDate)
+ csvReader, err := covermerger.InitNsRecords(ctx, ns, filePath, sourceCommit, fromDate, toDate)
if err != nil {
return nil, fmt.Errorf("failed to covermerger.InitNsRecords: %w", err)
}
defer csvReader.Close()
ch := make(chan *covermerger.FileMergeResult, 1)
- if err := covermerger.MergeCSVData(c, config, csvReader, ch); err != nil {
+ if err := covermerger.MergeCSVData(ctx, config, csvReader, ch); err != nil {
return nil, fmt.Errorf("error merging coverage: %w", err)
}
diff --git a/pkg/covermerger/covermerger.go b/pkg/covermerger/covermerger.go
index 82ebe1766..cd279e319 100644
--- a/pkg/covermerger/covermerger.go
+++ b/pkg/covermerger/covermerger.go
@@ -263,7 +263,7 @@ type FileMergeResult struct {
*MergeResult
}
-func MergeCSVData(c context.Context, config *Config, reader io.Reader, results chan<- *FileMergeResult) error {
+func MergeCSVData(ctx context.Context, config *Config, reader io.Reader, results chan<- *FileMergeResult) error {
var schema []string
csvReader := csv.NewReader(reader)
if fields, err := csvReader.Read(); err != nil {
@@ -295,13 +295,13 @@ func MergeCSVData(c context.Context, config *Config, reader io.Reader, results c
return
}
select {
- case <-c.Done():
+ case <-ctx.Done():
return
case recordsChan <- record:
}
}
}()
- errMerging := mergeChanData(c, config, recordsChan, results)
+ errMerging := mergeChanData(ctx, config, recordsChan, results)
errStream := <-errStreamChan
if errMerging != nil || errStream != nil {
return fmt.Errorf("errors merging stream data:\nmerger err: %w\nstream reader err: %w",
@@ -315,10 +315,10 @@ type FileRecords struct {
records []*FileRecord
}
-func mergeChanData(c context.Context, cfg *Config, recordChan <-chan *FileRecord, results chan<- *FileMergeResult,
+func mergeChanData(ctx context.Context, cfg *Config, recordChan <-chan *FileRecord, results chan<- *FileMergeResult,
) error {
g := errgroup.Group{}
- frecordChan := groupFileRecords(recordChan, c)
+ frecordChan := groupFileRecords(ctx, recordChan)
for i := 0; i < cfg.Jobs; i++ {
g.Go(func() error {
@@ -328,7 +328,7 @@ func mergeChanData(c context.Context, cfg *Config, recordChan <-chan *FileRecord
return fmt.Errorf("failed to batchFileData(%s): %w", frecord.fileName, err)
}
select {
- case <-c.Done():
+ case <-ctx.Done():
return nil
case results <- &FileMergeResult{
FilePath: frecord.fileName,
@@ -341,7 +341,7 @@ func mergeChanData(c context.Context, cfg *Config, recordChan <-chan *FileRecord
return g.Wait()
}
-func groupFileRecords(recordChan <-chan *FileRecord, ctx context.Context) chan FileRecords {
+func groupFileRecords(ctx context.Context, recordChan <-chan *FileRecord) chan FileRecords {
frecordChan := make(chan FileRecords)
go func() {
defer close(frecordChan)
diff --git a/pkg/flatrpc/conn.go b/pkg/flatrpc/conn.go
index 47b265493..e8ad1461f 100644
--- a/pkg/flatrpc/conn.go
+++ b/pkg/flatrpc/conn.go
@@ -45,11 +45,11 @@ func Listen(addr string) (*Serv, error) {
// Serve accepts incoming connections and calls handler for each of them.
// An error returned from the handler stops the server and aborts the whole processing.
-func (s *Serv) Serve(baseCtx context.Context, handler func(context.Context, *Conn) error) error {
- eg, ctx := errgroup.WithContext(baseCtx)
+func (s *Serv) Serve(ctx context.Context, handler func(context.Context, *Conn) error) error {
+ eg, groupCtx := errgroup.WithContext(ctx)
go func() {
// If the context is cancelled, stop the server.
- <-ctx.Done()
+ <-groupCtx.Done()
s.Close()
}()
for {
@@ -66,7 +66,7 @@ func (s *Serv) Serve(baseCtx context.Context, handler func(context.Context, *Con
continue
}
eg.Go(func() error {
- connCtx, cancel := context.WithCancel(ctx)
+ connCtx, cancel := context.WithCancel(groupCtx)
defer cancel()
c := NewConn(conn)
diff --git a/pkg/manager/diff.go b/pkg/manager/diff.go
index f972748d3..df2724206 100644
--- a/pkg/manager/diff.go
+++ b/pkg/manager/diff.go
@@ -150,39 +150,39 @@ const (
corpusTriageToMonitor = 0.99
)
-func (dc *diffContext) Loop(baseCtx context.Context) error {
- g, ctx := errgroup.WithContext(baseCtx)
+func (dc *diffContext) Loop(ctx context.Context) error {
+ g, groupCtx := errgroup.WithContext(ctx)
reproLoop := NewReproLoop(dc, dc.new.pool.Total()-dc.new.cfg.FuzzingVMs, false)
if dc.http != nil {
dc.http.ReproLoop = reproLoop
g.Go(func() error {
- return dc.http.Serve(ctx)
+ return dc.http.Serve(groupCtx)
})
}
g.Go(func() error {
select {
- case <-ctx.Done():
+ case <-groupCtx.Done():
return nil
- case <-dc.waitCorpusTriage(ctx, corpusTriageToRepro):
+ case <-dc.waitCorpusTriage(groupCtx, corpusTriageToRepro):
case <-dc.cfg.TriageDeadline():
log.Logf(0, "timed out waiting for coprus triage")
}
log.Logf(0, "starting bug reproductions")
- reproLoop.Loop(ctx)
+ reproLoop.Loop(groupCtx)
return nil
})
- g.Go(func() error { return dc.monitorPatchedCoverage(ctx) })
- g.Go(func() error { return dc.base.Loop(ctx) })
- g.Go(func() error { return dc.new.Loop(ctx) })
+ g.Go(func() error { return dc.monitorPatchedCoverage(groupCtx) })
+ g.Go(func() error { return dc.base.Loop(groupCtx) })
+ g.Go(func() error { return dc.new.Loop(groupCtx) })
runner := &reproRunner{done: make(chan reproRunnerResult, 2), kernel: dc.base}
statTimer := time.NewTicker(5 * time.Minute)
loop:
for {
select {
- case <-ctx.Done():
+ case <-groupCtx.Done():
break loop
case <-statTimer.C:
vals := make(map[string]int)
@@ -193,13 +193,13 @@ loop:
log.Logf(0, "STAT %s", data)
case rep := <-dc.base.crashes:
log.Logf(1, "base crash: %v", rep.Title)
- dc.reportBaseCrash(ctx, rep)
+ dc.reportBaseCrash(groupCtx, rep)
case ret := <-runner.done:
// We have run the reproducer on the base instance.
// A sanity check: the base kernel might have crashed with the same title
// since the moment we have stared the reproduction / running on the repro base.
- ignored := dc.ignoreCrash(ctx, ret.reproReport.Title)
+ ignored := dc.ignoreCrash(groupCtx, ret.reproReport.Title)
if ret.crashReport == nil && ignored {
// Report it as error so that we could at least find it in the logs.
log.Errorf("resulting crash of an approved repro result is to be ignored: %s",
@@ -207,7 +207,7 @@ loop:
} else if ret.crashReport == nil {
dc.store.BaseNotCrashed(ret.reproReport.Title)
select {
- case <-ctx.Done():
+ case <-groupCtx.Done():
case dc.patchedOnly <- &UniqueBug{
Report: ret.reproReport,
Repro: ret.repro,
@@ -226,7 +226,7 @@ loop:
})
}
} else {
- dc.reportBaseCrash(ctx, ret.crashReport)
+ dc.reportBaseCrash(groupCtx, ret.crashReport)
log.Logf(0, "crashes both: %s / %s", ret.reproReport.Title, ret.crashReport.Title)
}
case ret := <-dc.doneRepro:
@@ -239,7 +239,7 @@ loop:
log.Logf(1, "found repro for %q (orig title: %q, reliability: %2.f), took %.2f minutes",
ret.Repro.Report.Title, origTitle, ret.Repro.Reliability, ret.Stats.TotalTime.Minutes())
g.Go(func() error {
- runner.Run(ctx, ret.Repro, ret.Crash.FullRepro)
+ runner.Run(groupCtx, ret.Repro, ret.Crash.FullRepro)
return nil
})
} else {
@@ -486,27 +486,27 @@ func setup(name string, cfg *mgrconfig.Config, debug bool) (*kernelContext, erro
return kernelCtx, nil
}
-func (kc *kernelContext) Loop(baseCtx context.Context) error {
+func (kc *kernelContext) Loop(ctx context.Context) error {
defer log.Logf(1, "%s: kernel context loop terminated", kc.name)
if err := kc.serv.Listen(); err != nil {
return fmt.Errorf("failed to start rpc server: %w", err)
}
- eg, ctx := errgroup.WithContext(baseCtx)
- kc.ctx = ctx
+ eg, groupCtx := errgroup.WithContext(ctx)
+ kc.ctx = groupCtx
eg.Go(func() error {
defer log.Logf(1, "%s: rpc server terminaled", kc.name)
- return kc.serv.Serve(ctx)
+ return kc.serv.Serve(groupCtx)
})
eg.Go(func() error {
defer log.Logf(1, "%s: pool terminated", kc.name)
- kc.pool.Loop(ctx)
+ kc.pool.Loop(groupCtx)
return nil
})
eg.Go(func() error {
for {
select {
- case <-ctx.Done():
+ case <-groupCtx.Done():
return nil
case err := <-kc.pool.BootErrors:
title := "unknown"
diff --git a/pkg/rpcserver/local.go b/pkg/rpcserver/local.go
index c406cb9ed..4a0dd5389 100644
--- a/pkg/rpcserver/local.go
+++ b/pkg/rpcserver/local.go
@@ -114,35 +114,35 @@ type local struct {
setupDone chan bool
}
-func (ctx *local) MachineChecked(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) (queue.Source, error) {
- <-ctx.setupDone
- ctx.serv.TriagedCorpus()
- return ctx.cfg.MachineChecked(features, syscalls), nil
+func (l *local) MachineChecked(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) (queue.Source, error) {
+ <-l.setupDone
+ l.serv.TriagedCorpus()
+ return l.cfg.MachineChecked(features, syscalls), nil
}
-func (ctx *local) BugFrames() ([]string, []string) {
+func (l *local) BugFrames() ([]string, []string) {
return nil, nil
}
-func (ctx *local) MaxSignal() signal.Signal {
- return signal.FromRaw(ctx.cfg.MaxSignal, 0)
+func (l *local) MaxSignal() signal.Signal {
+ return signal.FromRaw(l.cfg.MaxSignal, 0)
}
-func (ctx *local) CoverageFilter(modules []*vminfo.KernelModule) ([]uint64, error) {
- return ctx.cfg.CoverFilter, nil
+func (l *local) CoverageFilter(modules []*vminfo.KernelModule) ([]uint64, error) {
+ return l.cfg.CoverFilter, nil
}
-func (ctx *local) Serve(context context.Context) error {
- return ctx.serv.Serve(context)
+func (l *local) Serve(ctx context.Context) error {
+ return l.serv.Serve(ctx)
}
-func (ctx *local) RunInstance(baseCtx context.Context, id int) error {
- connErr := ctx.serv.CreateInstance(id, nil, nil)
- defer ctx.serv.ShutdownInstance(id, true)
+func (l *local) RunInstance(ctx context.Context, id int) error {
+ connErr := l.serv.CreateInstance(id, nil, nil)
+ defer l.serv.ShutdownInstance(id, true)
- cfg := ctx.cfg
+ cfg := l.cfg
bin := cfg.Executor
- args := []string{"runner", fmt.Sprint(id), "localhost", fmt.Sprint(ctx.serv.Port())}
+ args := []string{"runner", fmt.Sprint(id), "localhost", fmt.Sprint(l.serv.Port())}
if cfg.GDB {
bin = "gdb"
args = append([]string{
@@ -152,7 +152,7 @@ func (ctx *local) RunInstance(baseCtx context.Context, id int) error {
cfg.Executor,
}, args...)
}
- cmd := exec.CommandContext(baseCtx, bin, args...)
+ cmd := exec.CommandContext(ctx, bin, args...)
cmd.Dir = cfg.Dir
if cfg.OutputWriter != nil {
cmd.Stdout = cfg.OutputWriter
@@ -169,7 +169,7 @@ func (ctx *local) RunInstance(baseCtx context.Context, id int) error {
}
var retErr error
select {
- case <-baseCtx.Done():
+ case <-ctx.Done():
case err := <-connErr:
if err != nil {
retErr = fmt.Errorf("connection error: %w", err)
@@ -181,7 +181,7 @@ func (ctx *local) RunInstance(baseCtx context.Context, id int) error {
retErr = fmt.Errorf("executor process exited: %w", err)
}
// Note that we ignore the error if we killed the process because of the context.
- if baseCtx.Err() == nil {
+ if ctx.Err() == nil {
return retErr
}
return nil
diff --git a/pkg/rpcserver/rpcserver.go b/pkg/rpcserver/rpcserver.go
index b0ab14c17..04c5d877f 100644
--- a/pkg/rpcserver/rpcserver.go
+++ b/pkg/rpcserver/rpcserver.go
@@ -408,11 +408,11 @@ func (serv *server) handleMachineInfo(infoReq *flatrpc.InfoRequestRawT) (handsha
}, nil
}
-func (serv *server) connectionLoop(baseCtx context.Context, runner *Runner) error {
+func (serv *server) connectionLoop(ctx context.Context, runner *Runner) error {
// To "cancel" the runner's loop we need to call runner.Stop().
// At the same time, we don't want to leak the goroutine that monitors it,
// so we derive a new context and cancel it on function exit.
- ctx, cancel := context.WithCancel(baseCtx)
+ ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
<-ctx.Done()
diff --git a/pkg/runtest/run.go b/pkg/runtest/run.go
index 7b3f1517f..716ebbc56 100644
--- a/pkg/runtest/run.go
+++ b/pkg/runtest/run.go
@@ -73,21 +73,21 @@ type Context struct {
buildSem chan bool
}
-func (ctx *Context) Init() {
+func (rt *Context) Init() {
// Run usually runs in a separate goroutine concurrently with request consumer (Next calls),
// so at least executor needs to be initialized before Run.
- ctx.executor = queue.DynamicOrder()
- ctx.buildSem = make(chan bool, runtime.GOMAXPROCS(0))
+ rt.executor = queue.DynamicOrder()
+ rt.buildSem = make(chan bool, runtime.GOMAXPROCS(0))
}
-func (ctx *Context) log(msg string, args ...any) {
- ctx.LogFunc(fmt.Sprintf(msg, args...))
+func (rt *Context) log(msg string, args ...any) {
+ rt.LogFunc(fmt.Sprintf(msg, args...))
}
-func (ctx *Context) Run(waitCtx context.Context) error {
- ctx.generatePrograms()
+func (rt *Context) Run(ctx context.Context) error {
+ rt.generatePrograms()
var ok, fail, broken, skip int
- for _, req := range ctx.requests {
+ for _, req := range rt.requests {
result := ""
verbose := false
if req.broken != "" {
@@ -103,7 +103,7 @@ func (ctx *Context) Run(waitCtx context.Context) error {
result = fmt.Sprintf("SKIP (%v)", req.skip)
verbose = true
} else {
- req.Request.Wait(waitCtx)
+ req.Request.Wait(ctx)
if req.err != nil {
fail++
result = fmt.Sprintf("FAIL: %v",
@@ -117,25 +117,25 @@ func (ctx *Context) Run(waitCtx context.Context) error {
result = "OK"
}
}
- if !verbose || ctx.Verbose {
- ctx.log("%-38v: %v", req.name, result)
+ if !verbose || rt.Verbose {
+ rt.log("%-38v: %v", req.name, result)
}
if req.Request != nil && req.Type == flatrpc.RequestTypeBinary && req.BinaryFile != "" {
os.Remove(req.BinaryFile)
}
}
- ctx.log("ok: %v, broken: %v, skip: %v, fail: %v", ok, broken, skip, fail)
+ rt.log("ok: %v, broken: %v, skip: %v, fail: %v", ok, broken, skip, fail)
if fail != 0 {
return fmt.Errorf("tests failed")
}
return nil
}
-func (ctx *Context) Next() *queue.Request {
- return ctx.executor.Next()
+func (rt *Context) Next() *queue.Request {
+ return rt.executor.Next()
}
-func (ctx *Context) onDone(req *runRequest, res *queue.Result) bool {
+func (rt *Context) onDone(req *runRequest, res *queue.Result) bool {
// The tests depend on timings and may be flaky, esp on overloaded/slow machines.
// We don't want to fix this by significantly bumping all timeouts,
// because if a program fails all the time with the default timeouts,
@@ -163,37 +163,37 @@ func (ctx *Context) onDone(req *runRequest, res *queue.Result) bool {
}
// We need at least `failed - ok + 1` more runs <=> `failed + ok + need` in total,
// which simplifies to `failed * 2 + 1`.
- retries := ctx.Retries
+ retries := rt.Retries
if retries%2 == 0 {
retries++
}
if req.failed*2+1 <= retries {
// We can still retry the execution.
- ctx.submit(req)
+ rt.submit(req)
return false
}
// Give up and fail on this request.
return true
}
-func (ctx *Context) generatePrograms() error {
+func (rt *Context) generatePrograms() error {
cover := []bool{false}
- if ctx.Features&flatrpc.FeatureCoverage != 0 {
+ if rt.Features&flatrpc.FeatureCoverage != 0 {
cover = append(cover, true)
}
var sandboxes []string
- for sandbox := range ctx.EnabledCalls {
+ for sandbox := range rt.EnabledCalls {
sandboxes = append(sandboxes, sandbox)
}
sort.Strings(sandboxes)
- files, err := progFileList(ctx.Dir, ctx.Tests)
+ files, err := progFileList(rt.Dir, rt.Tests)
if err != nil {
return err
}
for _, file := range files {
- if err := ctx.generateFile(sandboxes, cover, file); err != nil {
+ if err := rt.generateFile(sandboxes, cover, file); err != nil {
// Treat invalid programs as failing.
- ctx.createTest(&runRequest{
+ rt.createTest(&runRequest{
name: file,
failing: err.Error(),
})
@@ -220,21 +220,21 @@ func progFileList(dir, filter string) ([]string, error) {
return res, nil
}
-func (ctx *Context) generateFile(sandboxes []string, cover []bool, filename string) error {
- p, requires, results, err := parseProg(ctx.Target, ctx.Dir, filename, nil)
+func (rt *Context) generateFile(sandboxes []string, cover []bool, filename string) error {
+ p, requires, results, err := parseProg(rt.Target, rt.Dir, filename, nil)
if err != nil {
return err
}
if p == nil {
return nil
}
- sysTarget := targets.Get(ctx.Target.OS, ctx.Target.Arch)
+ sysTarget := targets.Get(rt.Target.OS, rt.Target.Arch)
nextSandbox:
for _, sandbox := range sandboxes {
name := fmt.Sprintf("%v %v", filename, sandbox)
for _, call := range p.Calls {
- if !ctx.EnabledCalls[sandbox][call.Meta] {
- ctx.createTest(&runRequest{
+ if !rt.EnabledCalls[sandbox][call.Meta] {
+ rt.createTest(&runRequest{
name: name,
skip: fmt.Sprintf("unsupported call %v", call.Meta.Name),
})
@@ -242,10 +242,10 @@ nextSandbox:
}
}
properties := map[string]bool{
- "manual": ctx.Tests != "", // "manual" tests run only if selected by the filter explicitly.
- "sandbox=" + sandbox: true,
- "bigendian": sysTarget.BigEndian,
- "arch=" + ctx.Target.Arch: true,
+ "manual": rt.Tests != "", // "manual" tests run only if selected by the filter explicitly.
+ "sandbox=" + sandbox: true,
+ "bigendian": sysTarget.BigEndian,
+ "arch=" + rt.Target.Arch: true,
}
for _, threaded := range []bool{false, true} {
if threaded {
@@ -271,11 +271,11 @@ nextSandbox:
properties["cover"] = cov
properties["C"] = false
properties["executor"] = true
- req, err := ctx.createSyzTest(p, sandbox, threaded, cov)
+ req, err := rt.createSyzTest(p, sandbox, threaded, cov)
if err != nil {
return err
}
- ctx.produceTest(req, name, properties, requires, results)
+ rt.produceTest(req, name, properties, requires, results)
}
if sysTarget.HostFuzzer {
// For HostFuzzer mode, we need to cross-compile
@@ -287,17 +287,17 @@ nextSandbox:
name += " C"
if !sysTarget.ExecutorUsesForkServer && times > 1 {
// Non-fork loop implementation does not support repetition.
- ctx.createTest(&runRequest{
+ rt.createTest(&runRequest{
name: name,
broken: "non-forking loop",
})
continue
}
- req, err := ctx.createCTest(p, sandbox, threaded, times)
+ req, err := rt.createCTest(p, sandbox, threaded, times)
if err != nil {
return err
}
- ctx.produceTest(req, name, properties, requires, results)
+ rt.produceTest(req, name, properties, requires, results)
}
}
}
@@ -368,37 +368,37 @@ func parseProg(target *prog.Target, dir, filename string, requires map[string]bo
return p, properties, info, nil
}
-func (ctx *Context) produceTest(req *runRequest, name string, properties,
+func (rt *Context) produceTest(req *runRequest, name string, properties,
requires map[string]bool, results *flatrpc.ProgInfo) {
req.name = name
req.results = results
if !manager.MatchRequirements(properties, requires) {
req.skip = "excluded by constraints"
}
- ctx.createTest(req)
+ rt.createTest(req)
}
-func (ctx *Context) createTest(req *runRequest) {
- req.executor = ctx.executor.Append()
- ctx.requests = append(ctx.requests, req)
+func (rt *Context) createTest(req *runRequest) {
+ req.executor = rt.executor.Append()
+ rt.requests = append(rt.requests, req)
if req.skip != "" || req.broken != "" || req.failing != "" {
return
}
if req.sourceOpts == nil {
- ctx.submit(req)
+ rt.submit(req)
return
}
go func() {
- ctx.buildSem <- true
+ rt.buildSem <- true
defer func() {
- <-ctx.buildSem
+ <-rt.buildSem
}()
src, err := csource.Write(req.Prog, *req.sourceOpts)
if err != nil {
req.err = fmt.Errorf("failed to create C source: %w", err)
req.Request.Done(&queue.Result{})
}
- bin, err := csource.Build(ctx.Target, src)
+ bin, err := csource.Build(rt.Target, src)
if err != nil {
req.err = fmt.Errorf("failed to build C program: %w", err)
req.Request.Done(&queue.Result{})
@@ -406,18 +406,18 @@ func (ctx *Context) createTest(req *runRequest) {
}
req.Type = flatrpc.RequestTypeBinary
req.BinaryFile = bin
- ctx.submit(req)
+ rt.submit(req)
}()
}
-func (ctx *Context) submit(req *runRequest) {
+func (rt *Context) submit(req *runRequest) {
req.OnDone(func(_ *queue.Request, res *queue.Result) bool {
- return ctx.onDone(req, res)
+ return rt.onDone(req, res)
})
req.executor.Submit(req.Request)
}
-func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bool) (*runRequest, error) {
+func (rt *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bool) (*runRequest, error) {
var opts flatrpc.ExecOpts
sandboxFlags, err := flatrpc.SandboxToFlags(sandbox)
if err != nil {
@@ -432,8 +432,8 @@ func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bo
opts.ExecFlags |= flatrpc.ExecFlagCollectSignal
opts.ExecFlags |= flatrpc.ExecFlagCollectCover
}
- opts.EnvFlags |= csource.FeaturesToFlags(ctx.Features, nil)
- if ctx.Debug {
+ opts.EnvFlags |= csource.FeaturesToFlags(rt.Features, nil)
+ if rt.Debug {
opts.EnvFlags |= flatrpc.ExecEnvDebug
}
req := &runRequest{
@@ -445,7 +445,7 @@ func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bo
return req, nil
}
-func (ctx *Context) createCTest(p *prog.Prog, sandbox string, threaded bool, times int) (*runRequest, error) {
+func (rt *Context) createCTest(p *prog.Prog, sandbox string, threaded bool, times int) (*runRequest, error) {
opts := csource.Options{
Threaded: threaded,
Repeat: times > 1,
@@ -457,22 +457,22 @@ func (ctx *Context) createCTest(p *prog.Prog, sandbox string, threaded bool, tim
HandleSegv: true,
Cgroups: p.Target.OS == targets.Linux && sandbox != "",
Trace: true,
- Swap: ctx.Features&flatrpc.FeatureSwap != 0,
+ Swap: rt.Features&flatrpc.FeatureSwap != 0,
}
if sandbox != "" {
- if ctx.Features&flatrpc.FeatureNetInjection != 0 {
+ if rt.Features&flatrpc.FeatureNetInjection != 0 {
opts.NetInjection = true
}
- if ctx.Features&flatrpc.FeatureNetDevices != 0 {
+ if rt.Features&flatrpc.FeatureNetDevices != 0 {
opts.NetDevices = true
}
- if ctx.Features&flatrpc.FeatureVhciInjection != 0 {
+ if rt.Features&flatrpc.FeatureVhciInjection != 0 {
opts.VhciInjection = true
}
- if ctx.Features&flatrpc.FeatureWifiEmulation != 0 {
+ if rt.Features&flatrpc.FeatureWifiEmulation != 0 {
opts.Wifi = true
}
- if ctx.Features&flatrpc.FeatureLRWPANEmulation != 0 {
+ if rt.Features&flatrpc.FeatureLRWPANEmulation != 0 {
opts.IEEE802154 = true
}
}