diff options
| author | Dmitry Vyukov <dvyukov@google.com> | 2026-01-15 20:53:57 +0100 |
|---|---|---|
| committer | Dmitry Vyukov <dvyukov@google.com> | 2026-01-20 21:12:57 +0000 |
| commit | 7f5908e77ae0e7fef4b7901341b8c2c4bbb74b28 (patch) | |
| tree | 2ccbc85132a170d046837de6bdd8be3317f94060 /syz-agent | |
| parent | 2494e18d5ced59fc7f0522749041e499d3082a9e (diff) | |
pkg/aflow: make LLM model per-agent rather than per-flow
Having LLM model per-agent is even more flexible than per-flow.
We can have some more complex tasks during patch generation with the most elaborate model,
but also some simpler ones with less elaborate models.
Diffstat (limited to 'syz-agent')
| -rw-r--r-- | syz-agent/agent.go | 15 |
1 files changed, 3 insertions, 12 deletions
diff --git a/syz-agent/agent.go b/syz-agent/agent.go index 54d6a67c6..d070144db 100644 --- a/syz-agent/agent.go +++ b/syz-agent/agent.go @@ -171,14 +171,9 @@ func (s *Server) poll(ctx context.Context) ( CodeRevision: prog.GitRevision, } for _, flow := range aflow.Flows { - model := flow.Model - if s.cfg.Model != "" { - model = s.cfg.Model - } req.Workflows = append(req.Workflows, dashapi.AIWorkflow{ - Type: flow.Type, - Name: flow.Name, - LLMModel: model, + Type: flow.Type, + Name: flow.Name, }) } resp, err := s.dash.AIJobPoll(req) @@ -210,10 +205,6 @@ func (s *Server) executeJob(ctx context.Context, req *dashapi.AIJobPollResp) (ma if flow == nil { return nil, fmt.Errorf("unsupported flow %q", req.Workflow) } - model := flow.Model - if s.cfg.Model != "" { - model = s.cfg.Model - } inputs := map[string]any{ "Syzkaller": osutil.Abs(filepath.FromSlash("syzkaller/current")), "CodesearchToolBin": s.cfg.CodesearchToolBin, @@ -230,5 +221,5 @@ func (s *Server) executeJob(ctx context.Context, req *dashapi.AIJobPollResp) (ma Span: span, }) } - return flow.Execute(ctx, model, s.workdir, inputs, s.cache, onEvent) + return flow.Execute(ctx, s.cfg.Model, s.workdir, inputs, s.cache, onEvent) } |
