From 7f5908e77ae0e7fef4b7901341b8c2c4bbb74b28 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Thu, 15 Jan 2026 20:53:57 +0100 Subject: pkg/aflow: make LLM model per-agent rather than per-flow Having LLM model per-agent is even more flexible than per-flow. We can have some more complex tasks during patch generation with the most elaborate model, but also some simpler ones with less elaborate models. --- dashboard/dashapi/ai.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'dashboard/dashapi/ai.go') diff --git a/dashboard/dashapi/ai.go b/dashboard/dashapi/ai.go index 8134e5744..dfa410402 100644 --- a/dashboard/dashapi/ai.go +++ b/dashboard/dashapi/ai.go @@ -14,9 +14,8 @@ type AIJobPollReq struct { } type AIWorkflow struct { - Type ai.WorkflowType - Name string - LLMModel string // LLM model that will be used to execute this workflow + Type ai.WorkflowType + Name string } type AIJobPollResp struct { -- cgit mrf-deployment