From 7f5908e77ae0e7fef4b7901341b8c2c4bbb74b28 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Thu, 15 Jan 2026 20:53:57 +0100 Subject: pkg/aflow: make LLM model per-agent rather than per-flow Having LLM model per-agent is even more flexible than per-flow. We can have some more complex tasks during patch generation with the most elaborate model, but also some simpler ones with less elaborate models. --- syz-agent/agent.go | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) (limited to 'syz-agent') diff --git a/syz-agent/agent.go b/syz-agent/agent.go index 54d6a67c6..d070144db 100644 --- a/syz-agent/agent.go +++ b/syz-agent/agent.go @@ -171,14 +171,9 @@ func (s *Server) poll(ctx context.Context) ( CodeRevision: prog.GitRevision, } for _, flow := range aflow.Flows { - model := flow.Model - if s.cfg.Model != "" { - model = s.cfg.Model - } req.Workflows = append(req.Workflows, dashapi.AIWorkflow{ - Type: flow.Type, - Name: flow.Name, - LLMModel: model, + Type: flow.Type, + Name: flow.Name, }) } resp, err := s.dash.AIJobPoll(req) @@ -210,10 +205,6 @@ func (s *Server) executeJob(ctx context.Context, req *dashapi.AIJobPollResp) (ma if flow == nil { return nil, fmt.Errorf("unsupported flow %q", req.Workflow) } - model := flow.Model - if s.cfg.Model != "" { - model = s.cfg.Model - } inputs := map[string]any{ "Syzkaller": osutil.Abs(filepath.FromSlash("syzkaller/current")), "CodesearchToolBin": s.cfg.CodesearchToolBin, @@ -230,5 +221,5 @@ func (s *Server) executeJob(ctx context.Context, req *dashapi.AIJobPollResp) (ma Span: span, }) } - return flow.Execute(ctx, model, s.workdir, inputs, s.cache, onEvent) + return flow.Execute(ctx, s.cfg.Model, s.workdir, inputs, s.cache, onEvent) } -- cgit mrf-deployment