aboutsummaryrefslogtreecommitdiffstats
path: root/pkg/aflow/execute.go
blob: 08593b30cd397696587fdb266a27c19bd1cba269 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
// Copyright 2025 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.

package aflow

import (
	"context"
	"errors"
	"fmt"
	"maps"
	"os"
	"slices"
	"strings"
	"sync"
	"time"

	"github.com/google/syzkaller/pkg/aflow/trajectory"
	"github.com/google/syzkaller/pkg/osutil"
	"google.golang.org/genai"
)

// Execute executes the given AI workflow with provided inputs and returns workflow outputs.
// The model argument overrides Gemini models used to execute LLM agents,
// if not set, then default models for each agent are used.
// The workdir argument should point to a dir owned by aflow to store private data,
// it can be shared across parallel executions in the same process, and preferably
// preserved across process restarts for caching purposes.
func (flow *Flow) Execute(ctx context.Context, model, workdir string, inputs map[string]any,
	cache *Cache, onEvent onEvent) (map[string]any, error) {
	if err := flow.checkInputs(inputs); err != nil {
		return nil, fmt.Errorf("flow inputs are missing: %w", err)
	}
	inputs = maps.Clone(inputs)
	maps.Insert(inputs, maps.All(flow.Consts))
	c := &Context{
		Context:  ctx,
		Workdir:  osutil.Abs(workdir),
		llmModel: model,
		cache:    cache,
		state:    inputs,
		onEvent:  onEvent,
	}

	defer c.Close()
	if s := ctx.Value(stubContextKey); s != nil {
		c.stubContext = *s.(*stubContext)
	}
	if c.timeNow == nil {
		c.timeNow = time.Now
	}
	if c.generateContent == nil {
		c.generateContent = c.generateContentGemini
	}
	span := &trajectory.Span{
		Type: trajectory.SpanFlow,
		Name: flow.Name,
	}
	if err := c.startSpan(span); err != nil {
		return nil, err
	}
	flowErr := flow.Root.execute(c)
	if flowErr == nil {
		span.Results = flow.extractOutputs(c.state)
	}
	if err := c.finishSpan(span, flowErr); err != nil {
		return nil, err
	}
	if c.spanNesting != 0 {
		// Since we finish all spans, even on errors, we should end up at 0.
		panic(fmt.Sprintf("unbalanced spans (%v)", c.spanNesting))
	}
	return span.Results, nil
}

// FlowError creates an error that denotes failure of the flow itself,
// rather than an infrastructure error. A flow errors mean an expected
// condition in the flow when it cannot continue, and cannot produce
// expected outputs. For example, if we are doing something with the kernel,
// but the kernel build fails. Flow errors shouldn't be flagged in
// infrastructure monitoring.
func FlowError(err error) error {
	return &flowError{err}
}

func IsFlowError(err error) bool {
	var flowErr *flowError
	return errors.As(err, &flowErr)
}

type flowError struct {
	error
}

func IsModelQuotaError(err error) string {
	var quotaErr *modelQuotaError
	if errors.As(err, &quotaErr) {
		return quotaErr.model
	}
	return ""
}

type modelQuotaError struct {
	model string
}

func (err *modelQuotaError) Error() string {
	return fmt.Sprintf("model %q is over daily quota", err.model)
}

func isTokenOverflowError(err error) bool {
	var overflowErr *tokenOverflowError
	return errors.As(err, &overflowErr)
}

type tokenOverflowError struct {
	error
}

// QuotaResetTime returns the time when RPD quota will be reset
// for a quota overflow happened at time t.
func QuotaResetTime(t time.Time) time.Time {
	// Requests per day (RPD) quotas reset at midnight Pacific time:
	// https://ai.google.dev/gemini-api/docs/rate-limits
	// To account for potential delays in the reset logic, we add small delta (5 mins)
	// to that to avoid situation when we reset it at exactly midnight locally,
	// but it's not reset on the server yet.
	// The assumption is also that any rate limiting errors in the very beginning
	// of the day (within first seconds/minutes), actually belong to the previous day
	// (we couldn't overflow the quota within that period).
	t = t.In(pacificLoc)
	resetTime := time.Date(t.Year(), t.Month(), t.Day(), 0, 5, 0, 0, pacificLoc)
	if t.After(resetTime) {
		resetTime = resetTime.Add(24 * time.Hour)
		if t.After(resetTime) {
			panic(fmt.Sprintf("%v > %v", t, resetTime))
		}
	}
	return resetTime.UTC()
}

var pacificLoc = func() *time.Location {
	loc, err := time.LoadLocation("US/Pacific")
	if err != nil {
		panic(err)
	}
	return loc
}()

type (
	onEvent        func(*trajectory.Span) error
	contextKeyType int
)

var (
	createClientOnce sync.Once
	createClientErr  error
	client           *genai.Client
	modelList        map[string]*modelInfo
	stubContextKey   = contextKeyType(1)
)

type modelInfo struct {
	Thinking         bool
	MaxTemperature   float32
	InputTokenLimit  int
	OutputTokenLimit int
}

func (ctx *Context) generateContentGemini(model string, cfg *genai.GenerateContentConfig,
	req []*genai.Content) (*genai.GenerateContentResponse, error) {
	createClientOnce.Do(func() {
		client, modelList, createClientErr = loadModelList(ctx.Context)
	})
	if createClientErr != nil {
		return nil, createClientErr
	}
	info := modelList[model]
	if info == nil {
		models := slices.Collect(maps.Keys(modelList))
		slices.Sort(models)
		return nil, fmt.Errorf("model %q does not exist (models: %v)", model, models)
	}
	*cfg.Temperature = min(*cfg.Temperature, info.MaxTemperature)
	if info.Thinking {
		// Don't alter the original object (that may affect request caching).
		cfgCopy := *cfg
		cfg = &cfgCopy
		cfg.ThinkingConfig = &genai.ThinkingConfig{
			// We capture them in the trajectory for analysis.
			IncludeThoughts: true,
			// Enable "dynamic thinking" ("the model will adjust the budget based on the complexity of the request").
			// See https://ai.google.dev/gemini-api/docs/thinking#set-budget
			// However, thoughts output also consumes total output token budget.
			// We may consider adjusting ThinkingLevel parameter.
			ThinkingBudget: genai.Ptr[int32](-1),
		}
	}
	// Sometimes LLM requests just hang dead for tens of minutes,
	// abort them after 10 minutes and retry. We don't stream reply tokens,
	// so some large requests can take several minutes.
	timedCtx, cancel := context.WithTimeout(ctx.Context, 10*time.Minute)
	defer cancel()
	resp, err := client.Models.GenerateContent(timedCtx, modelPrefix+model, req, cfg)
	if err != nil && timedCtx.Err() == context.DeadlineExceeded {
		return nil, &retryError{time.Second, err}
	}
	return resp, err
}

const modelPrefix = "models/"

func loadModelList(ctx context.Context) (*genai.Client, map[string]*modelInfo, error) {
	if os.Getenv("GOOGLE_API_KEY") == "" {
		return nil, nil, fmt.Errorf("set GOOGLE_API_KEY env var to use with Gemini" +
			" (see https://ai.google.dev/gemini-api/docs/api-key)")
	}
	client, err := genai.NewClient(ctx, nil)
	if err != nil {
		return nil, nil, err
	}
	models := make(map[string]*modelInfo)
	for m, err := range client.Models.All(ctx) {
		if err != nil {
			return nil, nil, err
		}
		if !slices.Contains(m.SupportedActions, "generateContent") ||
			strings.Contains(m.Name, "-image") ||
			strings.Contains(m.Name, "-audio") {
			continue
		}
		models[strings.TrimPrefix(m.Name, modelPrefix)] = &modelInfo{
			Thinking:         m.Thinking,
			MaxTemperature:   m.MaxTemperature,
			InputTokenLimit:  int(m.InputTokenLimit),
			OutputTokenLimit: int(m.OutputTokenLimit),
		}
	}
	return client, models, nil
}

type Context struct {
	Context     context.Context
	Workdir     string
	llmModel    string
	cache       *Cache
	cachedDirs  []string
	tempDirs    []string
	state       map[string]any
	onEvent     onEvent
	spanSeq     int
	spanNesting int
	stubContext
}

type stubContext struct {
	timeNow         func() time.Time
	generateContent func(string, *genai.GenerateContentConfig, []*genai.Content) (
		*genai.GenerateContentResponse, error)
}

func (ctx *Context) modelName(model string) string {
	if ctx.llmModel != "" {
		return ctx.llmModel
	}
	return model
}

func (ctx *Context) Cache(typ, desc string, populate func(string) error) (string, error) {
	dir, err := ctx.cache.Create(typ, desc, populate)
	if err != nil {
		return "", err
	}
	ctx.cachedDirs = append(ctx.cachedDirs, dir)
	return dir, nil
}

func CacheObject[T any](ctx *Context, typ, desc string, populate func() (T, error)) (T, error) {
	dir, obj, err := cacheCreateObject(ctx.cache, typ, desc, populate)
	if err != nil {
		return obj, err
	}
	ctx.cachedDirs = append(ctx.cachedDirs, dir)
	return obj, nil
}

// TempDir creates a new temp dir that will be automatically removed
// when the flow finished, or on the next restart.
func (ctx *Context) TempDir() (string, error) {
	dir, err := ctx.cache.TempDir()
	if err != nil {
		return "", err
	}
	ctx.tempDirs = append(ctx.tempDirs, dir)
	return dir, nil
}

func (ctx *Context) Close() {
	for _, dir := range ctx.cachedDirs {
		ctx.cache.Release(dir)
	}
	for _, dir := range ctx.tempDirs {
		os.RemoveAll(dir)
	}
}

func (ctx *Context) startSpan(span *trajectory.Span) error {
	span.Seq = ctx.spanSeq
	ctx.spanSeq++
	span.Nesting = ctx.spanNesting
	ctx.spanNesting++
	span.Started = ctx.timeNow()
	return ctx.onEvent(span)
}

func (ctx *Context) finishSpan(span *trajectory.Span, spanErr error) error {
	ctx.spanNesting--
	if ctx.spanNesting < 0 {
		panic("unbalanced spans")
	}
	span.Finished = ctx.timeNow()
	if spanErr != nil {
		span.Error = spanErr.Error()
	}
	err := ctx.onEvent(span)
	if spanErr != nil {
		err = spanErr
	}
	return err
}