aboutsummaryrefslogtreecommitdiffstats
path: root/pkg/aflow/execute.go
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2026-01-20 15:03:18 +0100
committerDmitry Vyukov <dvyukov@google.com>2026-01-21 13:38:45 +0000
commitc9820ab0fe2dce0914ff01bcaf3829ca82150eb2 (patch)
tree350f597291bec42753754dc1948acf8166c5ba68 /pkg/aflow/execute.go
parentf1d5c3ecdec0b86db1df926ccd3553157988690d (diff)
pkg/aflow: cache LLM requests
Using cached replies is faster, cheaper, and more reliable. Espcially handy during development when the same workflows are retried lots of times with some changes.
Diffstat (limited to 'pkg/aflow/execute.go')
-rw-r--r--pkg/aflow/execute.go3
1 files changed, 3 insertions, 0 deletions
diff --git a/pkg/aflow/execute.go b/pkg/aflow/execute.go
index 19f8d3aec..3e1a6a112 100644
--- a/pkg/aflow/execute.go
+++ b/pkg/aflow/execute.go
@@ -178,6 +178,9 @@ func (ctx *Context) generateContentGemini(model string, cfg *genai.GenerateConte
return nil, fmt.Errorf("model %q does not exist (models: %v)", model, models)
}
if thinking {
+ // Don't alter the original object (that may affect request caching).
+ cfgCopy := *cfg
+ cfg = &cfgCopy
cfg.ThinkingConfig = &genai.ThinkingConfig{
// We capture them in the trajectory for analysis.
IncludeThoughts: true,