From 7f5908e77ae0e7fef4b7901341b8c2c4bbb74b28 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Thu, 15 Jan 2026 20:53:57 +0100 Subject: pkg/aflow: make LLM model per-agent rather than per-flow Having LLM model per-agent is even more flexible than per-flow. We can have some more complex tasks during patch generation with the most elaborate model, but also some simpler ones with less elaborate models. --- tools/syz-aflow/aflow.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'tools/syz-aflow/aflow.go') diff --git a/tools/syz-aflow/aflow.go b/tools/syz-aflow/aflow.go index d915c2061..160d4541c 100644 --- a/tools/syz-aflow/aflow.go +++ b/tools/syz-aflow/aflow.go @@ -33,7 +33,7 @@ func main() { flagFlow = flag.String("workflow", "", "workflow to execute") flagInput = flag.String("input", "", "input json file with workflow arguments") flagWorkdir = flag.String("workdir", "", "directory for kernel checkout, kernel builds, etc") - flagModel = flag.String("model", "", "use this LLM model, if empty use the workflow default model") + flagModel = flag.String("model", "", "use this LLM model, if empty use default models") flagCacheSize = flag.String("cache-size", "10GB", "max cache size (e.g. 100MB, 5GB, 1TB)") flagDownloadBug = flag.String("download-bug", "", "extid of a bug to download from the dashboard"+ " and save into -input file") @@ -78,9 +78,6 @@ func run(ctx context.Context, model, flowName, inputFile, workdir string, cacheS if flow == nil { return fmt.Errorf("workflow %q is not found", flowName) } - if model == "" { - model = flow.Model - } inputData, err := os.ReadFile(inputFile) if err != nil { return fmt.Errorf("failed to open -input file: %w", err) -- cgit mrf-deployment