diff options
| author | Dmitry Vyukov <dvyukov@google.com> | 2026-01-15 20:53:57 +0100 |
|---|---|---|
| committer | Dmitry Vyukov <dvyukov@google.com> | 2026-01-20 21:12:57 +0000 |
| commit | 7f5908e77ae0e7fef4b7901341b8c2c4bbb74b28 (patch) | |
| tree | 2ccbc85132a170d046837de6bdd8be3317f94060 /tools/syz-aflow/aflow.go | |
| parent | 2494e18d5ced59fc7f0522749041e499d3082a9e (diff) | |
pkg/aflow: make LLM model per-agent rather than per-flow
Having LLM model per-agent is even more flexible than per-flow.
We can have some more complex tasks during patch generation with the most elaborate model,
but also some simpler ones with less elaborate models.
Diffstat (limited to 'tools/syz-aflow/aflow.go')
| -rw-r--r-- | tools/syz-aflow/aflow.go | 5 |
1 files changed, 1 insertions, 4 deletions
diff --git a/tools/syz-aflow/aflow.go b/tools/syz-aflow/aflow.go index d915c2061..160d4541c 100644 --- a/tools/syz-aflow/aflow.go +++ b/tools/syz-aflow/aflow.go @@ -33,7 +33,7 @@ func main() { flagFlow = flag.String("workflow", "", "workflow to execute") flagInput = flag.String("input", "", "input json file with workflow arguments") flagWorkdir = flag.String("workdir", "", "directory for kernel checkout, kernel builds, etc") - flagModel = flag.String("model", "", "use this LLM model, if empty use the workflow default model") + flagModel = flag.String("model", "", "use this LLM model, if empty use default models") flagCacheSize = flag.String("cache-size", "10GB", "max cache size (e.g. 100MB, 5GB, 1TB)") flagDownloadBug = flag.String("download-bug", "", "extid of a bug to download from the dashboard"+ " and save into -input file") @@ -78,9 +78,6 @@ func run(ctx context.Context, model, flowName, inputFile, workdir string, cacheS if flow == nil { return fmt.Errorf("workflow %q is not found", flowName) } - if model == "" { - model = flow.Model - } inputData, err := os.ReadFile(inputFile) if err != nil { return fmt.Errorf("failed to open -input file: %w", err) |
