From c9820ab0fe2dce0914ff01bcaf3829ca82150eb2 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Tue, 20 Jan 2026 15:03:18 +0100 Subject: pkg/aflow: cache LLM requests Using cached replies is faster, cheaper, and more reliable. Espcially handy during development when the same workflows are retried lots of times with some changes. --- pkg/aflow/func_tool_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'pkg/aflow/func_tool_test.go') diff --git a/pkg/aflow/func_tool_test.go b/pkg/aflow/func_tool_test.go index 429566dbe..2076e0bb9 100644 --- a/pkg/aflow/func_tool_test.go +++ b/pkg/aflow/func_tool_test.go @@ -8,6 +8,7 @@ import ( "errors" "path/filepath" "testing" + "time" "github.com/google/syzkaller/pkg/aflow/trajectory" "github.com/stretchr/testify/assert" @@ -103,7 +104,7 @@ func TestToolErrors(t *testing.T) { } ctx := context.WithValue(context.Background(), stubContextKey, stub) workdir := t.TempDir() - cache, err := newTestCache(t, filepath.Join(workdir, "cache"), 0, stub.timeNow) + cache, err := newTestCache(t, filepath.Join(workdir, "cache"), 0, time.Now) require.NoError(t, err) onEvent := func(span *trajectory.Span) error { return nil } _, err = flows["test"].Execute(ctx, "", workdir, nil, cache, onEvent) -- cgit mrf-deployment