From 7f5908e77ae0e7fef4b7901341b8c2c4bbb74b28 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Thu, 15 Jan 2026 20:53:57 +0100 Subject: pkg/aflow: make LLM model per-agent rather than per-flow Having LLM model per-agent is even more flexible than per-flow. We can have some more complex tasks during patch generation with the most elaborate model, but also some simpler ones with less elaborate models. --- pkg/aflow/flow/patching/patching.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'pkg/aflow/flow/patching') diff --git a/pkg/aflow/flow/patching/patching.go b/pkg/aflow/flow/patching/patching.go index 766cf089f..856962e6c 100644 --- a/pkg/aflow/flow/patching/patching.go +++ b/pkg/aflow/flow/patching/patching.go @@ -43,7 +43,6 @@ func init() { ai.WorkflowPatching, "generate a kernel patch fixing a provided bug reproducer", &aflow.Flow{ - Model: aflow.BestExpensiveModel, Root: &aflow.Pipeline{ Actions: []aflow.Action{ baseCommitPicker, @@ -54,6 +53,7 @@ func init() { codesearcher.PrepareIndex, &aflow.LLMAgent{ Name: "debugger", + Model: aflow.BestExpensiveModel, Reply: "BugExplanation", Temperature: 1, Instruction: debuggingInstruction, @@ -62,6 +62,7 @@ func init() { }, &aflow.LLMAgent{ Name: "diff-generator", + Model: aflow.BestExpensiveModel, Reply: "PatchDiff", Temperature: 1, Instruction: diffInstruction, @@ -70,6 +71,7 @@ func init() { }, &aflow.LLMAgent{ Name: "description-generator", + Model: aflow.BestExpensiveModel, Reply: "PatchDescription", Temperature: 1, Instruction: descriptionInstruction, -- cgit mrf-deployment