diff options
| author | Dmitry Vyukov <dvyukov@google.com> | 2026-01-15 20:53:57 +0100 |
|---|---|---|
| committer | Dmitry Vyukov <dvyukov@google.com> | 2026-01-20 21:12:57 +0000 |
| commit | 7f5908e77ae0e7fef4b7901341b8c2c4bbb74b28 (patch) | |
| tree | 2ccbc85132a170d046837de6bdd8be3317f94060 /dashboard/app/aidb/crud.go | |
| parent | 2494e18d5ced59fc7f0522749041e499d3082a9e (diff) | |
pkg/aflow: make LLM model per-agent rather than per-flow
Having LLM model per-agent is even more flexible than per-flow.
We can have some more complex tasks during patch generation with the most elaborate model,
but also some simpler ones with less elaborate models.
Diffstat (limited to 'dashboard/app/aidb/crud.go')
| -rw-r--r-- | dashboard/app/aidb/crud.go | 7 |
1 files changed, 1 insertions, 6 deletions
diff --git a/dashboard/app/aidb/crud.go b/dashboard/app/aidb/crud.go index 4f7e93f6a..872a70ace 100644 --- a/dashboard/app/aidb/crud.go +++ b/dashboard/app/aidb/crud.go @@ -129,12 +129,6 @@ func StartJob(ctx context.Context, req *dashapi.AIJobPollReq) (*Job, error) { job = jobs[0] } job.Started = spanner.NullTime{Time: TimeNow(ctx), Valid: true} - for _, flow := range req.Workflows { - if job.Workflow == flow.Name { - job.LLMModel = flow.LLMModel - break - } - } job.CodeRevision = req.CodeRevision mut, err := spanner.InsertOrUpdateStruct("Jobs", job) if err != nil { @@ -184,6 +178,7 @@ func StoreTrajectorySpan(ctx context.Context, jobID string, span *trajectory.Spa Nesting: int64(span.Nesting), Type: string(span.Type), Name: span.Name, + Model: span.Model, Started: span.Started, Finished: toNullTime(span.Finished), Error: toNullString(span.Error), |
