aboutsummaryrefslogtreecommitdiffstats
path: root/pkg
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2024-06-04 12:55:41 +0200
committerDmitry Vyukov <dvyukov@google.com>2024-06-24 09:57:34 +0000
commite16e2c9a4cb6937323e861b646792a6c4c978a3c (patch)
tree6c513e98e5f465b44a98546d8984485d2c128582 /pkg
parent90d67044dab68568e8f35bc14b68055dbd166eff (diff)
executor: add runner mode
Move all syz-fuzzer logic into syz-executor and remove syz-fuzzer. Also restore syz-runtest functionality in the manager. Update #4917 (sets most signal handlers to SIG_IGN)
Diffstat (limited to 'pkg')
-rw-r--r--pkg/csource/options.go58
-rw-r--r--pkg/flatrpc/conn.go60
-rw-r--r--pkg/flatrpc/conn_test.go67
-rw-r--r--pkg/flatrpc/flatrpc.fbs25
-rw-r--r--pkg/flatrpc/flatrpc.go537
-rw-r--r--pkg/flatrpc/flatrpc.h523
-rw-r--r--pkg/flatrpc/helpers.go34
-rw-r--r--pkg/fuzzer/fuzzer.go2
-rw-r--r--pkg/fuzzer/fuzzer_test.go176
-rw-r--r--pkg/fuzzer/job.go23
-rw-r--r--pkg/fuzzer/queue/queue.go27
-rw-r--r--pkg/host/features.go80
-rw-r--r--pkg/host/machine_info.go49
-rw-r--r--pkg/instance/instance.go49
-rw-r--r--pkg/ipc/gate.go76
-rw-r--r--pkg/ipc/ipc.go838
-rw-r--r--pkg/ipc/ipc_priv_test.go32
-rw-r--r--pkg/ipc/ipc_test.go262
-rw-r--r--pkg/ipc/ipcconfig/ipcconfig.go56
-rw-r--r--pkg/mgrconfig/load.go7
-rw-r--r--pkg/report/fuchsia.go2
-rw-r--r--pkg/report/linux.go6
-rw-r--r--pkg/report/testdata/fuchsia/report/630
-rw-r--r--pkg/rpcserver/last_executing.go68
-rw-r--r--pkg/rpcserver/last_executing_test.go56
-rw-r--r--pkg/rpcserver/local.go138
-rw-r--r--pkg/rpcserver/rpcserver.go796
-rw-r--r--pkg/runtest/executor_test.go131
-rw-r--r--pkg/runtest/run.go253
-rw-r--r--pkg/runtest/run_test.go175
-rw-r--r--pkg/vminfo/features.go20
-rw-r--r--pkg/vminfo/syscalls.go32
-rw-r--r--pkg/vminfo/vminfo.go17
-rw-r--r--pkg/vminfo/vminfo_test.go66
34 files changed, 2553 insertions, 2218 deletions
diff --git a/pkg/csource/options.go b/pkg/csource/options.go
index ba6dcfbed..ba44dd021 100644
--- a/pkg/csource/options.go
+++ b/pkg/csource/options.go
@@ -11,6 +11,7 @@ import (
"sort"
"strings"
+ "github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/mgrconfig"
"github.com/google/syzkaller/sys/targets"
)
@@ -364,3 +365,60 @@ var ExecutorOpts = Options{
Sandbox: "none",
UseTmpDir: true,
}
+
+func FeaturesToFlags(features flatrpc.Feature, manual Features) flatrpc.ExecEnv {
+ for feat := range flatrpc.EnumNamesFeature {
+ opt := FlatRPCFeaturesToCSource[feat]
+ if opt != "" && manual != nil && !manual[opt].Enabled {
+ features &= ^feat
+ }
+ }
+ var flags flatrpc.ExecEnv
+ if manual == nil || manual["net_reset"].Enabled {
+ flags |= flatrpc.ExecEnvEnableNetReset
+ }
+ if manual == nil || manual["cgroups"].Enabled {
+ flags |= flatrpc.ExecEnvEnableCgroups
+ }
+ if manual == nil || manual["close_fds"].Enabled {
+ flags |= flatrpc.ExecEnvEnableCloseFds
+ }
+ if features&flatrpc.FeatureExtraCoverage != 0 {
+ flags |= flatrpc.ExecEnvExtraCover
+ }
+ if features&flatrpc.FeatureDelayKcovMmap != 0 {
+ flags |= flatrpc.ExecEnvDelayKcovMmap
+ }
+ if features&flatrpc.FeatureNetInjection != 0 {
+ flags |= flatrpc.ExecEnvEnableTun
+ }
+ if features&flatrpc.FeatureNetDevices != 0 {
+ flags |= flatrpc.ExecEnvEnableNetDev
+ }
+ if features&flatrpc.FeatureDevlinkPCI != 0 {
+ flags |= flatrpc.ExecEnvEnableDevlinkPCI
+ }
+ if features&flatrpc.FeatureNicVF != 0 {
+ flags |= flatrpc.ExecEnvEnableNicVF
+ }
+ if features&flatrpc.FeatureVhciInjection != 0 {
+ flags |= flatrpc.ExecEnvEnableVhciInjection
+ }
+ if features&flatrpc.FeatureWifiEmulation != 0 {
+ flags |= flatrpc.ExecEnvEnableWifi
+ }
+ return flags
+}
+
+var FlatRPCFeaturesToCSource = map[flatrpc.Feature]string{
+ flatrpc.FeatureNetInjection: "tun",
+ flatrpc.FeatureNetDevices: "net_dev",
+ flatrpc.FeatureDevlinkPCI: "devlink_pci",
+ flatrpc.FeatureNicVF: "nic_vf",
+ flatrpc.FeatureVhciInjection: "vhci",
+ flatrpc.FeatureWifiEmulation: "wifi",
+ flatrpc.FeatureUSBEmulation: "usb",
+ flatrpc.FeatureBinFmtMisc: "binfmt_misc",
+ flatrpc.FeatureLRWPANEmulation: "ieee802154",
+ flatrpc.FeatureSwap: "swap",
+}
diff --git a/pkg/flatrpc/conn.go b/pkg/flatrpc/conn.go
index ba028fe62..9d8d1ce59 100644
--- a/pkg/flatrpc/conn.go
+++ b/pkg/flatrpc/conn.go
@@ -8,10 +8,9 @@ import (
"fmt"
"io"
"net"
- "os"
+ "reflect"
"slices"
"sync"
- "time"
flatbuffers "github.com/google/flatbuffers/go"
"github.com/google/syzkaller/pkg/log"
@@ -50,7 +49,7 @@ func ListenAndServe(addr string, handler func(*Conn)) (*Serv, error) {
continue
}
go func() {
- c := newConn(conn)
+ c := NewConn(conn)
defer c.Close()
handler(c)
}()
@@ -77,22 +76,7 @@ type Conn struct {
lastMsg int
}
-func Dial(addr string, timeScale time.Duration) (*Conn, error) {
- var conn net.Conn
- var err error
- if addr == "stdin" {
- // This is used by vm/gvisor which passes us a unix socket connection in stdin.
- conn, err = net.FileConn(os.Stdin)
- } else {
- conn, err = net.DialTimeout("tcp", addr, time.Minute*timeScale)
- }
- if err != nil {
- return nil, err
- }
- return newConn(conn), nil
-}
-
-func newConn(conn net.Conn) *Conn {
+func NewConn(conn net.Conn) *Conn {
return &Conn{
conn: conn,
builder: flatbuffers.NewBuilder(0),
@@ -125,14 +109,31 @@ func Send[T sendMsg](c *Conn, msg T) error {
return nil
}
-// Recv received an RPC message.
-// The type T is supposed to be a normal flatbuffers type (not ending with T, e.g. ConnectRequest).
+// Recv receives an RPC message.
+// The type T is supposed to be a pointer to a normal flatbuffers type (not ending with T, e.g. *ConnectRequestRaw).
// Receiving should be done from a single goroutine, the received message is valid
// only until the next Recv call (messages share the same underlying receive buffer).
-func Recv[T any, PT interface {
- *T
+func Recv[Raw interface {
+ UnPack() *T
flatbuffers.FlatBuffer
-}](c *Conn) (*T, error) {
+}, T any](c *Conn) (res *T, err0 error) {
+ defer func() {
+ if err1 := recover(); err1 != nil {
+ if err2, ok := err1.(error); ok {
+ err0 = err2
+ } else {
+ err0 = fmt.Errorf("%v", err1)
+ }
+ }
+ }()
+ raw, err := RecvRaw[Raw](c)
+ if err != nil {
+ return nil, err
+ }
+ return raw.UnPack(), nil
+}
+
+func RecvRaw[T flatbuffers.FlatBuffer](c *Conn) (T, error) {
// First, discard the previous message.
// For simplicity we copy any data from the next message to the beginning of the buffer.
// Theoretically we could something more efficient, e.g. don't copy if we already
@@ -146,21 +147,24 @@ func Recv[T any, PT interface {
sizePrefixSize = flatbuffers.SizeUint32
maxMessageSize = 64 << 20
)
- msg := PT(new(T))
+ var msg T
// Then, receive at least the size prefix (4 bytes).
// And then the full message, if we have not got it yet.
if err := c.recv(sizePrefixSize); err != nil {
- return nil, fmt.Errorf("failed to recv %T: %w", msg, err)
+ return msg, fmt.Errorf("failed to recv %T: %w", msg, err)
}
size := int(flatbuffers.GetSizePrefix(c.data, 0))
if size > maxMessageSize {
- return nil, fmt.Errorf("message %T has too large size %v", msg, size)
+ return msg, fmt.Errorf("message %T has too large size %v", msg, size)
}
c.lastMsg = sizePrefixSize + size
if err := c.recv(c.lastMsg); err != nil {
- return nil, fmt.Errorf("failed to recv %T: %w", msg, err)
+ return msg, fmt.Errorf("failed to recv %T: %w", msg, err)
}
statRecv.Add(c.lastMsg)
+ // This probably can't be expressed w/o reflect as "new U" where U is *T,
+ // but I failed to express that as generic constraints.
+ msg = reflect.New(reflect.TypeOf(msg).Elem()).Interface().(T)
data := c.data[sizePrefixSize:c.lastMsg]
msg.Init(data, flatbuffers.GetUOffsetT(data))
return msg, nil
diff --git a/pkg/flatrpc/conn_test.go b/pkg/flatrpc/conn_test.go
index 38b9e6980..a6f7f23f9 100644
--- a/pkg/flatrpc/conn_test.go
+++ b/pkg/flatrpc/conn_test.go
@@ -4,7 +4,11 @@
package flatrpc
import (
+ "net"
+ "os"
+ "syscall"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
)
@@ -40,22 +44,22 @@ func TestConn(t *testing.T) {
}()
serv, err := ListenAndServe(":0", func(c *Conn) {
defer close(done)
- connectReqGot, err := Recv[ConnectRequestRaw](c)
+ connectReqGot, err := Recv[*ConnectRequestRaw](c)
if err != nil {
t.Fatal(err)
}
- assert.Equal(t, connectReq, connectReqGot.UnPack())
+ assert.Equal(t, connectReq, connectReqGot)
if err := Send(c, connectReply); err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
- got, err := Recv[ExecutorMessageRaw](c)
+ got, err := Recv[*ExecutorMessageRaw](c)
if err != nil {
t.Fatal(err)
}
- assert.Equal(t, executorMsg, got.UnPack())
+ assert.Equal(t, executorMsg, got)
}
})
if err != nil {
@@ -63,21 +67,18 @@ func TestConn(t *testing.T) {
}
defer serv.Close()
- c, err := Dial(serv.Addr.String(), 1)
- if err != nil {
- t.Fatal(err)
- }
+ c := dial(t, serv.Addr.String())
defer c.Close()
if err := Send(c, connectReq); err != nil {
t.Fatal(err)
}
- connectReplyGot, err := Recv[ConnectReplyRaw](c)
+ connectReplyGot, err := Recv[*ConnectReplyRaw](c)
if err != nil {
t.Fatal(err)
}
- assert.Equal(t, connectReply, connectReplyGot.UnPack())
+ assert.Equal(t, connectReply, connectReplyGot)
for i := 0; i < 10; i++ {
if err := Send(c, executorMsg); err != nil {
@@ -108,7 +109,7 @@ func BenchmarkConn(b *testing.B) {
serv, err := ListenAndServe(":0", func(c *Conn) {
defer close(done)
for i := 0; i < b.N; i++ {
- _, err := Recv[ConnectRequestRaw](c)
+ _, err := Recv[*ConnectRequestRaw](c)
if err != nil {
b.Fatal(err)
}
@@ -122,10 +123,7 @@ func BenchmarkConn(b *testing.B) {
}
defer serv.Close()
- c, err := Dial(serv.Addr.String(), 1)
- if err != nil {
- b.Fatal(err)
- }
+ c := dial(b, serv.Addr.String())
defer c.Close()
b.ReportAllocs()
@@ -134,9 +132,46 @@ func BenchmarkConn(b *testing.B) {
if err := Send(c, connectReq); err != nil {
b.Fatal(err)
}
- _, err := Recv[ConnectReplyRaw](c)
+ _, err := Recv[*ConnectReplyRaw](c)
if err != nil {
b.Fatal(err)
}
}
}
+
+func dial(t testing.TB, addr string) *Conn {
+ conn, err := net.DialTimeout("tcp", addr, time.Minute)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return NewConn(conn)
+}
+
+func FuzzRecv(f *testing.F) {
+ f.Fuzz(func(t *testing.T, data []byte) {
+ data = data[:min(len(data), 1<<10)]
+ fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ w := os.NewFile(uintptr(fds[0]), "")
+ r := os.NewFile(uintptr(fds[1]), "")
+ defer w.Close()
+ defer r.Close()
+ if _, err := w.Write(data); err != nil {
+ t.Fatal(err)
+ }
+ w.Close()
+ n, err := net.FileConn(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c := NewConn(n)
+ for {
+ _, err := Recv[*ExecutorMessageRaw](c)
+ if err != nil {
+ break
+ }
+ }
+ })
+}
diff --git a/pkg/flatrpc/flatrpc.fbs b/pkg/flatrpc/flatrpc.fbs
index 78adc8ec5..b51ea0c70 100644
--- a/pkg/flatrpc/flatrpc.fbs
+++ b/pkg/flatrpc/flatrpc.fbs
@@ -8,6 +8,7 @@ enum Feature : uint64 (bit_flags) {
Comparisons,
ExtraCoverage,
DelayKcovMmap,
+ SandboxNone,
SandboxSetuid,
SandboxNamespace,
SandboxAndroid,
@@ -35,8 +36,11 @@ table ConnectRequestRaw {
table ConnectReplyRaw {
debug :bool;
+ cover :bool;
procs :int32;
slowdown :int32;
+ syscall_timeout_ms :int32;
+ program_timeout_ms :int32;
leak_frames :[string];
race_frames :[string];
// Fuzzer sets up these features and returns results in InfoRequest.features.
@@ -79,7 +83,8 @@ table FeatureInfoRaw {
union HostMessagesRaw {
ExecRequest :ExecRequestRaw,
SignalUpdate :SignalUpdateRaw,
- StartLeakChecks :StartLeakChecksRaw
+ StartLeakChecks :StartLeakChecksRaw,
+ StateRequest :StateRequestRaw,
}
table HostMessageRaw {
@@ -90,6 +95,7 @@ table HostMessageRaw {
union ExecutorMessagesRaw {
ExecResult :ExecResultRaw,
Executing :ExecutingMessageRaw,
+ State :StateResultRaw,
}
table ExecutorMessageRaw {
@@ -100,8 +106,6 @@ enum RequestFlag : uint64 (bit_flags) {
// If set, prog_data contains compiled executable binary
// that needs to be written to disk and executed.
IsBinary,
- // If set, fully reset executor state befor executing the test.
- ResetState,
// If set, collect program output and return in output field.
ReturnOutput,
// If set, don't fail on program failures, instead return the error in error field.
@@ -112,6 +116,8 @@ enum RequestFlag : uint64 (bit_flags) {
enum ExecEnv : uint64 (bit_flags) {
Debug, // debug output from executor
Signal, // collect feedback signals (coverage)
+ ResetState, // fully reset executor state befor executing the test
+ SandboxNone, // minimal sandboxing
SandboxSetuid, // impersonate nobody user
SandboxNamespace, // use namespaces for sandboxing
SandboxAndroid, // use Android sandboxing for the untrusted_app domain
@@ -150,12 +156,8 @@ table ExecRequestRaw {
prog_data :[uint8];
exec_opts :ExecOptsRaw;
flags :RequestFlag;
- signal_filter :[uint64];
- signal_filter_call :int32;
// Return all signal for these calls.
all_signal :[int32];
- // Repeat the program that many times (0 means 1).
- repeat :int32;
}
table SignalUpdateRaw {
@@ -169,6 +171,9 @@ table SignalUpdateRaw {
table StartLeakChecksRaw {
}
+table StateRequestRaw {
+}
+
// Notification from the executor that it started executing the program 'id'.
// We want this request to be as small and as fast as possible b/c we need it
// to reach the host (or at least leave the VM) before the VM crashes
@@ -209,6 +214,8 @@ struct ComparisonRaw {
table ProgInfoRaw {
calls :[CallInfoRaw];
// Contains signal and cover collected from background threads.
+ // The raw version is exported by executor, and them merged into extra on the host.
+ extra_raw :[CallInfoRaw];
extra :CallInfoRaw;
// Total execution time of the program in nanoseconds.
elapsed :uint64;
@@ -223,3 +230,7 @@ table ExecResultRaw {
error :string;
info :ProgInfoRaw;
}
+
+table StateResultRaw {
+ data :[uint8];
+}
diff --git a/pkg/flatrpc/flatrpc.go b/pkg/flatrpc/flatrpc.go
index b561334fe..28c28ca8e 100644
--- a/pkg/flatrpc/flatrpc.go
+++ b/pkg/flatrpc/flatrpc.go
@@ -15,22 +15,23 @@ const (
FeatureComparisons Feature = 2
FeatureExtraCoverage Feature = 4
FeatureDelayKcovMmap Feature = 8
- FeatureSandboxSetuid Feature = 16
- FeatureSandboxNamespace Feature = 32
- FeatureSandboxAndroid Feature = 64
- FeatureFault Feature = 128
- FeatureLeak Feature = 256
- FeatureNetInjection Feature = 512
- FeatureNetDevices Feature = 1024
- FeatureKCSAN Feature = 2048
- FeatureDevlinkPCI Feature = 4096
- FeatureNicVF Feature = 8192
- FeatureUSBEmulation Feature = 16384
- FeatureVhciInjection Feature = 32768
- FeatureWifiEmulation Feature = 65536
- FeatureLRWPANEmulation Feature = 131072
- FeatureBinFmtMisc Feature = 262144
- FeatureSwap Feature = 524288
+ FeatureSandboxNone Feature = 16
+ FeatureSandboxSetuid Feature = 32
+ FeatureSandboxNamespace Feature = 64
+ FeatureSandboxAndroid Feature = 128
+ FeatureFault Feature = 256
+ FeatureLeak Feature = 512
+ FeatureNetInjection Feature = 1024
+ FeatureNetDevices Feature = 2048
+ FeatureKCSAN Feature = 4096
+ FeatureDevlinkPCI Feature = 8192
+ FeatureNicVF Feature = 16384
+ FeatureUSBEmulation Feature = 32768
+ FeatureVhciInjection Feature = 65536
+ FeatureWifiEmulation Feature = 131072
+ FeatureLRWPANEmulation Feature = 262144
+ FeatureBinFmtMisc Feature = 524288
+ FeatureSwap Feature = 1048576
)
var EnumNamesFeature = map[Feature]string{
@@ -38,6 +39,7 @@ var EnumNamesFeature = map[Feature]string{
FeatureComparisons: "Comparisons",
FeatureExtraCoverage: "ExtraCoverage",
FeatureDelayKcovMmap: "DelayKcovMmap",
+ FeatureSandboxNone: "SandboxNone",
FeatureSandboxSetuid: "SandboxSetuid",
FeatureSandboxNamespace: "SandboxNamespace",
FeatureSandboxAndroid: "SandboxAndroid",
@@ -61,6 +63,7 @@ var EnumValuesFeature = map[string]Feature{
"Comparisons": FeatureComparisons,
"ExtraCoverage": FeatureExtraCoverage,
"DelayKcovMmap": FeatureDelayKcovMmap,
+ "SandboxNone": FeatureSandboxNone,
"SandboxSetuid": FeatureSandboxSetuid,
"SandboxNamespace": FeatureSandboxNamespace,
"SandboxAndroid": FeatureSandboxAndroid,
@@ -93,6 +96,7 @@ const (
HostMessagesRawExecRequest HostMessagesRaw = 1
HostMessagesRawSignalUpdate HostMessagesRaw = 2
HostMessagesRawStartLeakChecks HostMessagesRaw = 3
+ HostMessagesRawStateRequest HostMessagesRaw = 4
)
var EnumNamesHostMessagesRaw = map[HostMessagesRaw]string{
@@ -100,6 +104,7 @@ var EnumNamesHostMessagesRaw = map[HostMessagesRaw]string{
HostMessagesRawExecRequest: "ExecRequest",
HostMessagesRawSignalUpdate: "SignalUpdate",
HostMessagesRawStartLeakChecks: "StartLeakChecks",
+ HostMessagesRawStateRequest: "StateRequest",
}
var EnumValuesHostMessagesRaw = map[string]HostMessagesRaw{
@@ -107,6 +112,7 @@ var EnumValuesHostMessagesRaw = map[string]HostMessagesRaw{
"ExecRequest": HostMessagesRawExecRequest,
"SignalUpdate": HostMessagesRawSignalUpdate,
"StartLeakChecks": HostMessagesRawStartLeakChecks,
+ "StateRequest": HostMessagesRawStateRequest,
}
func (v HostMessagesRaw) String() string {
@@ -132,6 +138,8 @@ func (t *HostMessagesRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse
return t.Value.(*SignalUpdateRawT).Pack(builder)
case HostMessagesRawStartLeakChecks:
return t.Value.(*StartLeakChecksRawT).Pack(builder)
+ case HostMessagesRawStateRequest:
+ return t.Value.(*StateRequestRawT).Pack(builder)
}
return 0
}
@@ -147,6 +155,9 @@ func (rcv HostMessagesRaw) UnPack(table flatbuffers.Table) *HostMessagesRawT {
case HostMessagesRawStartLeakChecks:
x := StartLeakChecksRaw{_tab: table}
return &HostMessagesRawT{Type: HostMessagesRawStartLeakChecks, Value: x.UnPack()}
+ case HostMessagesRawStateRequest:
+ x := StateRequestRaw{_tab: table}
+ return &HostMessagesRawT{Type: HostMessagesRawStateRequest, Value: x.UnPack()}
}
return nil
}
@@ -157,18 +168,21 @@ const (
ExecutorMessagesRawNONE ExecutorMessagesRaw = 0
ExecutorMessagesRawExecResult ExecutorMessagesRaw = 1
ExecutorMessagesRawExecuting ExecutorMessagesRaw = 2
+ ExecutorMessagesRawState ExecutorMessagesRaw = 3
)
var EnumNamesExecutorMessagesRaw = map[ExecutorMessagesRaw]string{
ExecutorMessagesRawNONE: "NONE",
ExecutorMessagesRawExecResult: "ExecResult",
ExecutorMessagesRawExecuting: "Executing",
+ ExecutorMessagesRawState: "State",
}
var EnumValuesExecutorMessagesRaw = map[string]ExecutorMessagesRaw{
"NONE": ExecutorMessagesRawNONE,
"ExecResult": ExecutorMessagesRawExecResult,
"Executing": ExecutorMessagesRawExecuting,
+ "State": ExecutorMessagesRawState,
}
func (v ExecutorMessagesRaw) String() string {
@@ -192,6 +206,8 @@ func (t *ExecutorMessagesRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UO
return t.Value.(*ExecResultRawT).Pack(builder)
case ExecutorMessagesRawExecuting:
return t.Value.(*ExecutingMessageRawT).Pack(builder)
+ case ExecutorMessagesRawState:
+ return t.Value.(*StateResultRawT).Pack(builder)
}
return 0
}
@@ -204,6 +220,9 @@ func (rcv ExecutorMessagesRaw) UnPack(table flatbuffers.Table) *ExecutorMessages
case ExecutorMessagesRawExecuting:
x := ExecutingMessageRaw{_tab: table}
return &ExecutorMessagesRawT{Type: ExecutorMessagesRawExecuting, Value: x.UnPack()}
+ case ExecutorMessagesRawState:
+ x := StateResultRaw{_tab: table}
+ return &ExecutorMessagesRawT{Type: ExecutorMessagesRawState, Value: x.UnPack()}
}
return nil
}
@@ -212,21 +231,18 @@ type RequestFlag uint64
const (
RequestFlagIsBinary RequestFlag = 1
- RequestFlagResetState RequestFlag = 2
- RequestFlagReturnOutput RequestFlag = 4
- RequestFlagReturnError RequestFlag = 8
+ RequestFlagReturnOutput RequestFlag = 2
+ RequestFlagReturnError RequestFlag = 4
)
var EnumNamesRequestFlag = map[RequestFlag]string{
RequestFlagIsBinary: "IsBinary",
- RequestFlagResetState: "ResetState",
RequestFlagReturnOutput: "ReturnOutput",
RequestFlagReturnError: "ReturnError",
}
var EnumValuesRequestFlag = map[string]RequestFlag{
"IsBinary": RequestFlagIsBinary,
- "ResetState": RequestFlagResetState,
"ReturnOutput": RequestFlagReturnOutput,
"ReturnError": RequestFlagReturnError,
}
@@ -243,25 +259,29 @@ type ExecEnv uint64
const (
ExecEnvDebug ExecEnv = 1
ExecEnvSignal ExecEnv = 2
- ExecEnvSandboxSetuid ExecEnv = 4
- ExecEnvSandboxNamespace ExecEnv = 8
- ExecEnvSandboxAndroid ExecEnv = 16
- ExecEnvExtraCover ExecEnv = 32
- ExecEnvEnableTun ExecEnv = 64
- ExecEnvEnableNetDev ExecEnv = 128
- ExecEnvEnableNetReset ExecEnv = 256
- ExecEnvEnableCgroups ExecEnv = 512
- ExecEnvEnableCloseFds ExecEnv = 1024
- ExecEnvEnableDevlinkPCI ExecEnv = 2048
- ExecEnvEnableVhciInjection ExecEnv = 4096
- ExecEnvEnableWifi ExecEnv = 8192
- ExecEnvDelayKcovMmap ExecEnv = 16384
- ExecEnvEnableNicVF ExecEnv = 32768
+ ExecEnvResetState ExecEnv = 4
+ ExecEnvSandboxNone ExecEnv = 8
+ ExecEnvSandboxSetuid ExecEnv = 16
+ ExecEnvSandboxNamespace ExecEnv = 32
+ ExecEnvSandboxAndroid ExecEnv = 64
+ ExecEnvExtraCover ExecEnv = 128
+ ExecEnvEnableTun ExecEnv = 256
+ ExecEnvEnableNetDev ExecEnv = 512
+ ExecEnvEnableNetReset ExecEnv = 1024
+ ExecEnvEnableCgroups ExecEnv = 2048
+ ExecEnvEnableCloseFds ExecEnv = 4096
+ ExecEnvEnableDevlinkPCI ExecEnv = 8192
+ ExecEnvEnableVhciInjection ExecEnv = 16384
+ ExecEnvEnableWifi ExecEnv = 32768
+ ExecEnvDelayKcovMmap ExecEnv = 65536
+ ExecEnvEnableNicVF ExecEnv = 131072
)
var EnumNamesExecEnv = map[ExecEnv]string{
ExecEnvDebug: "Debug",
ExecEnvSignal: "Signal",
+ ExecEnvResetState: "ResetState",
+ ExecEnvSandboxNone: "SandboxNone",
ExecEnvSandboxSetuid: "SandboxSetuid",
ExecEnvSandboxNamespace: "SandboxNamespace",
ExecEnvSandboxAndroid: "SandboxAndroid",
@@ -281,6 +301,8 @@ var EnumNamesExecEnv = map[ExecEnv]string{
var EnumValuesExecEnv = map[string]ExecEnv{
"Debug": ExecEnvDebug,
"Signal": ExecEnvSignal,
+ "ResetState": ExecEnvResetState,
+ "SandboxNone": ExecEnvSandboxNone,
"SandboxSetuid": ExecEnvSandboxSetuid,
"SandboxNamespace": ExecEnvSandboxNamespace,
"SandboxAndroid": ExecEnvSandboxAndroid,
@@ -485,14 +507,17 @@ func ConnectRequestRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
}
type ConnectReplyRawT struct {
- Debug bool `json:"debug"`
- Procs int32 `json:"procs"`
- Slowdown int32 `json:"slowdown"`
- LeakFrames []string `json:"leak_frames"`
- RaceFrames []string `json:"race_frames"`
- Features Feature `json:"features"`
- Files []string `json:"files"`
- Globs []string `json:"globs"`
+ Debug bool `json:"debug"`
+ Cover bool `json:"cover"`
+ Procs int32 `json:"procs"`
+ Slowdown int32 `json:"slowdown"`
+ SyscallTimeoutMs int32 `json:"syscall_timeout_ms"`
+ ProgramTimeoutMs int32 `json:"program_timeout_ms"`
+ LeakFrames []string `json:"leak_frames"`
+ RaceFrames []string `json:"race_frames"`
+ Features Feature `json:"features"`
+ Files []string `json:"files"`
+ Globs []string `json:"globs"`
}
func (t *ConnectReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
@@ -553,8 +578,11 @@ func (t *ConnectReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse
}
ConnectReplyRawStart(builder)
ConnectReplyRawAddDebug(builder, t.Debug)
+ ConnectReplyRawAddCover(builder, t.Cover)
ConnectReplyRawAddProcs(builder, t.Procs)
ConnectReplyRawAddSlowdown(builder, t.Slowdown)
+ ConnectReplyRawAddSyscallTimeoutMs(builder, t.SyscallTimeoutMs)
+ ConnectReplyRawAddProgramTimeoutMs(builder, t.ProgramTimeoutMs)
ConnectReplyRawAddLeakFrames(builder, leakFramesOffset)
ConnectReplyRawAddRaceFrames(builder, raceFramesOffset)
ConnectReplyRawAddFeatures(builder, t.Features)
@@ -565,8 +593,11 @@ func (t *ConnectReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse
func (rcv *ConnectReplyRaw) UnPackTo(t *ConnectReplyRawT) {
t.Debug = rcv.Debug()
+ t.Cover = rcv.Cover()
t.Procs = rcv.Procs()
t.Slowdown = rcv.Slowdown()
+ t.SyscallTimeoutMs = rcv.SyscallTimeoutMs()
+ t.ProgramTimeoutMs = rcv.ProgramTimeoutMs()
leakFramesLength := rcv.LeakFramesLength()
t.LeakFrames = make([]string, leakFramesLength)
for j := 0; j < leakFramesLength; j++ {
@@ -638,20 +669,32 @@ func (rcv *ConnectReplyRaw) MutateDebug(n bool) bool {
return rcv._tab.MutateBoolSlot(4, n)
}
-func (rcv *ConnectReplyRaw) Procs() int32 {
+func (rcv *ConnectReplyRaw) Cover() bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+func (rcv *ConnectReplyRaw) MutateCover(n bool) bool {
+ return rcv._tab.MutateBoolSlot(6, n)
+}
+
+func (rcv *ConnectReplyRaw) Procs() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
return rcv._tab.GetInt32(o + rcv._tab.Pos)
}
return 0
}
func (rcv *ConnectReplyRaw) MutateProcs(n int32) bool {
- return rcv._tab.MutateInt32Slot(6, n)
+ return rcv._tab.MutateInt32Slot(8, n)
}
func (rcv *ConnectReplyRaw) Slowdown() int32 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
return rcv._tab.GetInt32(o + rcv._tab.Pos)
}
@@ -659,11 +702,35 @@ func (rcv *ConnectReplyRaw) Slowdown() int32 {
}
func (rcv *ConnectReplyRaw) MutateSlowdown(n int32) bool {
- return rcv._tab.MutateInt32Slot(8, n)
+ return rcv._tab.MutateInt32Slot(10, n)
+}
+
+func (rcv *ConnectReplyRaw) SyscallTimeoutMs() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ return rcv._tab.GetInt32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *ConnectReplyRaw) MutateSyscallTimeoutMs(n int32) bool {
+ return rcv._tab.MutateInt32Slot(12, n)
+}
+
+func (rcv *ConnectReplyRaw) ProgramTimeoutMs() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
+ if o != 0 {
+ return rcv._tab.GetInt32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *ConnectReplyRaw) MutateProgramTimeoutMs(n int32) bool {
+ return rcv._tab.MutateInt32Slot(14, n)
}
func (rcv *ConnectReplyRaw) LeakFrames(j int) []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
@@ -672,7 +739,7 @@ func (rcv *ConnectReplyRaw) LeakFrames(j int) []byte {
}
func (rcv *ConnectReplyRaw) LeakFramesLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -680,7 +747,7 @@ func (rcv *ConnectReplyRaw) LeakFramesLength() int {
}
func (rcv *ConnectReplyRaw) RaceFrames(j int) []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
@@ -689,7 +756,7 @@ func (rcv *ConnectReplyRaw) RaceFrames(j int) []byte {
}
func (rcv *ConnectReplyRaw) RaceFramesLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -697,7 +764,7 @@ func (rcv *ConnectReplyRaw) RaceFramesLength() int {
}
func (rcv *ConnectReplyRaw) Features() Feature {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
if o != 0 {
return Feature(rcv._tab.GetUint64(o + rcv._tab.Pos))
}
@@ -705,11 +772,11 @@ func (rcv *ConnectReplyRaw) Features() Feature {
}
func (rcv *ConnectReplyRaw) MutateFeatures(n Feature) bool {
- return rcv._tab.MutateUint64Slot(14, uint64(n))
+ return rcv._tab.MutateUint64Slot(20, uint64(n))
}
func (rcv *ConnectReplyRaw) Files(j int) []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
@@ -718,7 +785,7 @@ func (rcv *ConnectReplyRaw) Files(j int) []byte {
}
func (rcv *ConnectReplyRaw) FilesLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -726,7 +793,7 @@ func (rcv *ConnectReplyRaw) FilesLength() int {
}
func (rcv *ConnectReplyRaw) Globs(j int) []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(24))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
@@ -735,7 +802,7 @@ func (rcv *ConnectReplyRaw) Globs(j int) []byte {
}
func (rcv *ConnectReplyRaw) GlobsLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(24))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -743,40 +810,49 @@ func (rcv *ConnectReplyRaw) GlobsLength() int {
}
func ConnectReplyRawStart(builder *flatbuffers.Builder) {
- builder.StartObject(8)
+ builder.StartObject(11)
}
func ConnectReplyRawAddDebug(builder *flatbuffers.Builder, debug bool) {
builder.PrependBoolSlot(0, debug, false)
}
+func ConnectReplyRawAddCover(builder *flatbuffers.Builder, cover bool) {
+ builder.PrependBoolSlot(1, cover, false)
+}
func ConnectReplyRawAddProcs(builder *flatbuffers.Builder, procs int32) {
- builder.PrependInt32Slot(1, procs, 0)
+ builder.PrependInt32Slot(2, procs, 0)
}
func ConnectReplyRawAddSlowdown(builder *flatbuffers.Builder, slowdown int32) {
- builder.PrependInt32Slot(2, slowdown, 0)
+ builder.PrependInt32Slot(3, slowdown, 0)
+}
+func ConnectReplyRawAddSyscallTimeoutMs(builder *flatbuffers.Builder, syscallTimeoutMs int32) {
+ builder.PrependInt32Slot(4, syscallTimeoutMs, 0)
+}
+func ConnectReplyRawAddProgramTimeoutMs(builder *flatbuffers.Builder, programTimeoutMs int32) {
+ builder.PrependInt32Slot(5, programTimeoutMs, 0)
}
func ConnectReplyRawAddLeakFrames(builder *flatbuffers.Builder, leakFrames flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(leakFrames), 0)
+ builder.PrependUOffsetTSlot(6, flatbuffers.UOffsetT(leakFrames), 0)
}
func ConnectReplyRawStartLeakFramesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func ConnectReplyRawAddRaceFrames(builder *flatbuffers.Builder, raceFrames flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(raceFrames), 0)
+ builder.PrependUOffsetTSlot(7, flatbuffers.UOffsetT(raceFrames), 0)
}
func ConnectReplyRawStartRaceFramesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func ConnectReplyRawAddFeatures(builder *flatbuffers.Builder, features Feature) {
- builder.PrependUint64Slot(5, uint64(features), 0)
+ builder.PrependUint64Slot(8, uint64(features), 0)
}
func ConnectReplyRawAddFiles(builder *flatbuffers.Builder, files flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(6, flatbuffers.UOffsetT(files), 0)
+ builder.PrependUOffsetTSlot(9, flatbuffers.UOffsetT(files), 0)
}
func ConnectReplyRawStartFilesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func ConnectReplyRawAddGlobs(builder *flatbuffers.Builder, globs flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(7, flatbuffers.UOffsetT(globs), 0)
+ builder.PrependUOffsetTSlot(10, flatbuffers.UOffsetT(globs), 0)
}
func ConnectReplyRawStartGlobsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
@@ -1741,14 +1817,11 @@ func CreateExecOptsRaw(builder *flatbuffers.Builder, envFlags ExecEnv, execFlags
}
type ExecRequestRawT struct {
- Id int64 `json:"id"`
- ProgData []byte `json:"prog_data"`
- ExecOpts *ExecOptsRawT `json:"exec_opts"`
- Flags RequestFlag `json:"flags"`
- SignalFilter []uint64 `json:"signal_filter"`
- SignalFilterCall int32 `json:"signal_filter_call"`
- AllSignal []int32 `json:"all_signal"`
- Repeat int32 `json:"repeat"`
+ Id int64 `json:"id"`
+ ProgData []byte `json:"prog_data"`
+ ExecOpts *ExecOptsRawT `json:"exec_opts"`
+ Flags RequestFlag `json:"flags"`
+ AllSignal []int32 `json:"all_signal"`
}
func (t *ExecRequestRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
@@ -1759,15 +1832,6 @@ func (t *ExecRequestRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffset
if t.ProgData != nil {
progDataOffset = builder.CreateByteString(t.ProgData)
}
- signalFilterOffset := flatbuffers.UOffsetT(0)
- if t.SignalFilter != nil {
- signalFilterLength := len(t.SignalFilter)
- ExecRequestRawStartSignalFilterVector(builder, signalFilterLength)
- for j := signalFilterLength - 1; j >= 0; j-- {
- builder.PrependUint64(t.SignalFilter[j])
- }
- signalFilterOffset = builder.EndVector(signalFilterLength)
- }
allSignalOffset := flatbuffers.UOffsetT(0)
if t.AllSignal != nil {
allSignalLength := len(t.AllSignal)
@@ -1783,10 +1847,7 @@ func (t *ExecRequestRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffset
execOptsOffset := t.ExecOpts.Pack(builder)
ExecRequestRawAddExecOpts(builder, execOptsOffset)
ExecRequestRawAddFlags(builder, t.Flags)
- ExecRequestRawAddSignalFilter(builder, signalFilterOffset)
- ExecRequestRawAddSignalFilterCall(builder, t.SignalFilterCall)
ExecRequestRawAddAllSignal(builder, allSignalOffset)
- ExecRequestRawAddRepeat(builder, t.Repeat)
return ExecRequestRawEnd(builder)
}
@@ -1795,18 +1856,11 @@ func (rcv *ExecRequestRaw) UnPackTo(t *ExecRequestRawT) {
t.ProgData = rcv.ProgDataBytes()
t.ExecOpts = rcv.ExecOpts(nil).UnPack()
t.Flags = rcv.Flags()
- signalFilterLength := rcv.SignalFilterLength()
- t.SignalFilter = make([]uint64, signalFilterLength)
- for j := 0; j < signalFilterLength; j++ {
- t.SignalFilter[j] = rcv.SignalFilter(j)
- }
- t.SignalFilterCall = rcv.SignalFilterCall()
allSignalLength := rcv.AllSignalLength()
t.AllSignal = make([]int32, allSignalLength)
for j := 0; j < allSignalLength; j++ {
t.AllSignal[j] = rcv.AllSignal(j)
}
- t.Repeat = rcv.Repeat()
}
func (rcv *ExecRequestRaw) UnPack() *ExecRequestRawT {
@@ -1916,46 +1970,8 @@ func (rcv *ExecRequestRaw) MutateFlags(n RequestFlag) bool {
return rcv._tab.MutateUint64Slot(10, uint64(n))
}
-func (rcv *ExecRequestRaw) SignalFilter(j int) uint64 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
- if o != 0 {
- a := rcv._tab.Vector(o)
- return rcv._tab.GetUint64(a + flatbuffers.UOffsetT(j*8))
- }
- return 0
-}
-
-func (rcv *ExecRequestRaw) SignalFilterLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
- if o != 0 {
- return rcv._tab.VectorLen(o)
- }
- return 0
-}
-
-func (rcv *ExecRequestRaw) MutateSignalFilter(j int, n uint64) bool {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
- if o != 0 {
- a := rcv._tab.Vector(o)
- return rcv._tab.MutateUint64(a+flatbuffers.UOffsetT(j*8), n)
- }
- return false
-}
-
-func (rcv *ExecRequestRaw) SignalFilterCall() int32 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
- if o != 0 {
- return rcv._tab.GetInt32(o + rcv._tab.Pos)
- }
- return 0
-}
-
-func (rcv *ExecRequestRaw) MutateSignalFilterCall(n int32) bool {
- return rcv._tab.MutateInt32Slot(14, n)
-}
-
func (rcv *ExecRequestRaw) AllSignal(j int) int32 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetInt32(a + flatbuffers.UOffsetT(j*4))
@@ -1964,7 +1980,7 @@ func (rcv *ExecRequestRaw) AllSignal(j int) int32 {
}
func (rcv *ExecRequestRaw) AllSignalLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -1972,7 +1988,7 @@ func (rcv *ExecRequestRaw) AllSignalLength() int {
}
func (rcv *ExecRequestRaw) MutateAllSignal(j int, n int32) bool {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.MutateInt32(a+flatbuffers.UOffsetT(j*4), n)
@@ -1980,20 +1996,8 @@ func (rcv *ExecRequestRaw) MutateAllSignal(j int, n int32) bool {
return false
}
-func (rcv *ExecRequestRaw) Repeat() int32 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
- if o != 0 {
- return rcv._tab.GetInt32(o + rcv._tab.Pos)
- }
- return 0
-}
-
-func (rcv *ExecRequestRaw) MutateRepeat(n int32) bool {
- return rcv._tab.MutateInt32Slot(18, n)
-}
-
func ExecRequestRawStart(builder *flatbuffers.Builder) {
- builder.StartObject(8)
+ builder.StartObject(5)
}
func ExecRequestRawAddId(builder *flatbuffers.Builder, id int64) {
builder.PrependInt64Slot(0, id, 0)
@@ -2010,24 +2014,12 @@ func ExecRequestRawAddExecOpts(builder *flatbuffers.Builder, execOpts flatbuffer
func ExecRequestRawAddFlags(builder *flatbuffers.Builder, flags RequestFlag) {
builder.PrependUint64Slot(3, uint64(flags), 0)
}
-func ExecRequestRawAddSignalFilter(builder *flatbuffers.Builder, signalFilter flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(signalFilter), 0)
-}
-func ExecRequestRawStartSignalFilterVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
- return builder.StartVector(8, numElems, 8)
-}
-func ExecRequestRawAddSignalFilterCall(builder *flatbuffers.Builder, signalFilterCall int32) {
- builder.PrependInt32Slot(5, signalFilterCall, 0)
-}
func ExecRequestRawAddAllSignal(builder *flatbuffers.Builder, allSignal flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(6, flatbuffers.UOffsetT(allSignal), 0)
+ builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(allSignal), 0)
}
func ExecRequestRawStartAllSignalVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
-func ExecRequestRawAddRepeat(builder *flatbuffers.Builder, repeat int32) {
- builder.PrependInt32Slot(7, repeat, 0)
-}
func ExecRequestRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
@@ -2242,6 +2234,63 @@ func StartLeakChecksRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
+type StateRequestRawT struct {
+}
+
+func (t *StateRequestRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ if t == nil {
+ return 0
+ }
+ StateRequestRawStart(builder)
+ return StateRequestRawEnd(builder)
+}
+
+func (rcv *StateRequestRaw) UnPackTo(t *StateRequestRawT) {
+}
+
+func (rcv *StateRequestRaw) UnPack() *StateRequestRawT {
+ if rcv == nil {
+ return nil
+ }
+ t := &StateRequestRawT{}
+ rcv.UnPackTo(t)
+ return t
+}
+
+type StateRequestRaw struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsStateRequestRaw(buf []byte, offset flatbuffers.UOffsetT) *StateRequestRaw {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &StateRequestRaw{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func GetSizePrefixedRootAsStateRequestRaw(buf []byte, offset flatbuffers.UOffsetT) *StateRequestRaw {
+ n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
+ x := &StateRequestRaw{}
+ x.Init(buf, n+offset+flatbuffers.SizeUint32)
+ return x
+}
+
+func (rcv *StateRequestRaw) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *StateRequestRaw) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func StateRequestRawStart(builder *flatbuffers.Builder) {
+ builder.StartObject(0)
+}
+func StateRequestRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
+
type ExecutingMessageRawT struct {
Id int64 `json:"id"`
ProcId int32 `json:"proc_id"`
@@ -2664,6 +2713,7 @@ func CreateComparisonRaw(builder *flatbuffers.Builder, op1 uint64, op2 uint64) f
type ProgInfoRawT struct {
Calls []*CallInfoRawT `json:"calls"`
+ ExtraRaw []*CallInfoRawT `json:"extra_raw"`
Extra *CallInfoRawT `json:"extra"`
Elapsed uint64 `json:"elapsed"`
Freshness uint64 `json:"freshness"`
@@ -2686,9 +2736,23 @@ func (t *ProgInfoRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
}
callsOffset = builder.EndVector(callsLength)
}
+ extraRawOffset := flatbuffers.UOffsetT(0)
+ if t.ExtraRaw != nil {
+ extraRawLength := len(t.ExtraRaw)
+ extraRawOffsets := make([]flatbuffers.UOffsetT, extraRawLength)
+ for j := 0; j < extraRawLength; j++ {
+ extraRawOffsets[j] = t.ExtraRaw[j].Pack(builder)
+ }
+ ProgInfoRawStartExtraRawVector(builder, extraRawLength)
+ for j := extraRawLength - 1; j >= 0; j-- {
+ builder.PrependUOffsetT(extraRawOffsets[j])
+ }
+ extraRawOffset = builder.EndVector(extraRawLength)
+ }
extraOffset := t.Extra.Pack(builder)
ProgInfoRawStart(builder)
ProgInfoRawAddCalls(builder, callsOffset)
+ ProgInfoRawAddExtraRaw(builder, extraRawOffset)
ProgInfoRawAddExtra(builder, extraOffset)
ProgInfoRawAddElapsed(builder, t.Elapsed)
ProgInfoRawAddFreshness(builder, t.Freshness)
@@ -2703,6 +2767,13 @@ func (rcv *ProgInfoRaw) UnPackTo(t *ProgInfoRawT) {
rcv.Calls(&x, j)
t.Calls[j] = x.UnPack()
}
+ extraRawLength := rcv.ExtraRawLength()
+ t.ExtraRaw = make([]*CallInfoRawT, extraRawLength)
+ for j := 0; j < extraRawLength; j++ {
+ x := CallInfoRaw{}
+ rcv.ExtraRaw(&x, j)
+ t.ExtraRaw[j] = x.UnPack()
+ }
t.Extra = rcv.Extra(nil).UnPack()
t.Elapsed = rcv.Elapsed()
t.Freshness = rcv.Freshness()
@@ -2764,9 +2835,29 @@ func (rcv *ProgInfoRaw) CallsLength() int {
return 0
}
-func (rcv *ProgInfoRaw) Extra(obj *CallInfoRaw) *CallInfoRaw {
+func (rcv *ProgInfoRaw) ExtraRaw(obj *CallInfoRaw, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 4
+ x = rcv._tab.Indirect(x)
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *ProgInfoRaw) ExtraRawLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+func (rcv *ProgInfoRaw) Extra(obj *CallInfoRaw) *CallInfoRaw {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(CallInfoRaw)
@@ -2778,7 +2869,7 @@ func (rcv *ProgInfoRaw) Extra(obj *CallInfoRaw) *CallInfoRaw {
}
func (rcv *ProgInfoRaw) Elapsed() uint64 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
return rcv._tab.GetUint64(o + rcv._tab.Pos)
}
@@ -2786,11 +2877,11 @@ func (rcv *ProgInfoRaw) Elapsed() uint64 {
}
func (rcv *ProgInfoRaw) MutateElapsed(n uint64) bool {
- return rcv._tab.MutateUint64Slot(8, n)
+ return rcv._tab.MutateUint64Slot(10, n)
}
func (rcv *ProgInfoRaw) Freshness() uint64 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
return rcv._tab.GetUint64(o + rcv._tab.Pos)
}
@@ -2798,11 +2889,11 @@ func (rcv *ProgInfoRaw) Freshness() uint64 {
}
func (rcv *ProgInfoRaw) MutateFreshness(n uint64) bool {
- return rcv._tab.MutateUint64Slot(10, n)
+ return rcv._tab.MutateUint64Slot(12, n)
}
func ProgInfoRawStart(builder *flatbuffers.Builder) {
- builder.StartObject(4)
+ builder.StartObject(5)
}
func ProgInfoRawAddCalls(builder *flatbuffers.Builder, calls flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(calls), 0)
@@ -2810,14 +2901,20 @@ func ProgInfoRawAddCalls(builder *flatbuffers.Builder, calls flatbuffers.UOffset
func ProgInfoRawStartCallsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
+func ProgInfoRawAddExtraRaw(builder *flatbuffers.Builder, extraRaw flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(extraRaw), 0)
+}
+func ProgInfoRawStartExtraRawVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(4, numElems, 4)
+}
func ProgInfoRawAddExtra(builder *flatbuffers.Builder, extra flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(extra), 0)
+ builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(extra), 0)
}
func ProgInfoRawAddElapsed(builder *flatbuffers.Builder, elapsed uint64) {
- builder.PrependUint64Slot(2, elapsed, 0)
+ builder.PrependUint64Slot(3, elapsed, 0)
}
func ProgInfoRawAddFreshness(builder *flatbuffers.Builder, freshness uint64) {
- builder.PrependUint64Slot(3, freshness, 0)
+ builder.PrependUint64Slot(4, freshness, 0)
}
func ProgInfoRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
@@ -2979,3 +3076,107 @@ func ExecResultRawAddInfo(builder *flatbuffers.Builder, info flatbuffers.UOffset
func ExecResultRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
+
+type StateResultRawT struct {
+ Data []byte `json:"data"`
+}
+
+func (t *StateResultRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ if t == nil {
+ return 0
+ }
+ dataOffset := flatbuffers.UOffsetT(0)
+ if t.Data != nil {
+ dataOffset = builder.CreateByteString(t.Data)
+ }
+ StateResultRawStart(builder)
+ StateResultRawAddData(builder, dataOffset)
+ return StateResultRawEnd(builder)
+}
+
+func (rcv *StateResultRaw) UnPackTo(t *StateResultRawT) {
+ t.Data = rcv.DataBytes()
+}
+
+func (rcv *StateResultRaw) UnPack() *StateResultRawT {
+ if rcv == nil {
+ return nil
+ }
+ t := &StateResultRawT{}
+ rcv.UnPackTo(t)
+ return t
+}
+
+type StateResultRaw struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsStateResultRaw(buf []byte, offset flatbuffers.UOffsetT) *StateResultRaw {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &StateResultRaw{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func GetSizePrefixedRootAsStateResultRaw(buf []byte, offset flatbuffers.UOffsetT) *StateResultRaw {
+ n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
+ x := &StateResultRaw{}
+ x.Init(buf, n+offset+flatbuffers.SizeUint32)
+ return x
+}
+
+func (rcv *StateResultRaw) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *StateResultRaw) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *StateResultRaw) Data(j int) byte {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1))
+ }
+ return 0
+}
+
+func (rcv *StateResultRaw) DataLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+func (rcv *StateResultRaw) DataBytes() []byte {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.ByteVector(o + rcv._tab.Pos)
+ }
+ return nil
+}
+
+func (rcv *StateResultRaw) MutateData(j int, n byte) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.MutateByte(a+flatbuffers.UOffsetT(j*1), n)
+ }
+ return false
+}
+
+func StateResultRawStart(builder *flatbuffers.Builder) {
+ builder.StartObject(1)
+}
+func StateResultRawAddData(builder *flatbuffers.Builder, data flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(data), 0)
+}
+func StateResultRawStartDataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(1, numElems, 1)
+}
+func StateResultRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/pkg/flatrpc/flatrpc.h b/pkg/flatrpc/flatrpc.h
index d430e48f2..6f448c410 100644
--- a/pkg/flatrpc/flatrpc.h
+++ b/pkg/flatrpc/flatrpc.h
@@ -65,6 +65,10 @@ struct StartLeakChecksRaw;
struct StartLeakChecksRawBuilder;
struct StartLeakChecksRawT;
+struct StateRequestRaw;
+struct StateRequestRawBuilder;
+struct StateRequestRawT;
+
struct ExecutingMessageRaw;
struct ExecutingMessageRawBuilder;
struct ExecutingMessageRawT;
@@ -83,38 +87,44 @@ struct ExecResultRaw;
struct ExecResultRawBuilder;
struct ExecResultRawT;
+struct StateResultRaw;
+struct StateResultRawBuilder;
+struct StateResultRawT;
+
enum class Feature : uint64_t {
Coverage = 1ULL,
Comparisons = 2ULL,
ExtraCoverage = 4ULL,
DelayKcovMmap = 8ULL,
- SandboxSetuid = 16ULL,
- SandboxNamespace = 32ULL,
- SandboxAndroid = 64ULL,
- Fault = 128ULL,
- Leak = 256ULL,
- NetInjection = 512ULL,
- NetDevices = 1024ULL,
- KCSAN = 2048ULL,
- DevlinkPCI = 4096ULL,
- NicVF = 8192ULL,
- USBEmulation = 16384ULL,
- VhciInjection = 32768ULL,
- WifiEmulation = 65536ULL,
- LRWPANEmulation = 131072ULL,
- BinFmtMisc = 262144ULL,
- Swap = 524288ULL,
+ SandboxNone = 16ULL,
+ SandboxSetuid = 32ULL,
+ SandboxNamespace = 64ULL,
+ SandboxAndroid = 128ULL,
+ Fault = 256ULL,
+ Leak = 512ULL,
+ NetInjection = 1024ULL,
+ NetDevices = 2048ULL,
+ KCSAN = 4096ULL,
+ DevlinkPCI = 8192ULL,
+ NicVF = 16384ULL,
+ USBEmulation = 32768ULL,
+ VhciInjection = 65536ULL,
+ WifiEmulation = 131072ULL,
+ LRWPANEmulation = 262144ULL,
+ BinFmtMisc = 524288ULL,
+ Swap = 1048576ULL,
NONE = 0,
- ANY = 1048575ULL
+ ANY = 2097151ULL
};
FLATBUFFERS_DEFINE_BITMASK_OPERATORS(Feature, uint64_t)
-inline const Feature (&EnumValuesFeature())[20] {
+inline const Feature (&EnumValuesFeature())[21] {
static const Feature values[] = {
Feature::Coverage,
Feature::Comparisons,
Feature::ExtraCoverage,
Feature::DelayKcovMmap,
+ Feature::SandboxNone,
Feature::SandboxSetuid,
Feature::SandboxNamespace,
Feature::SandboxAndroid,
@@ -141,6 +151,7 @@ inline const char *EnumNameFeature(Feature e) {
case Feature::Comparisons: return "Comparisons";
case Feature::ExtraCoverage: return "ExtraCoverage";
case Feature::DelayKcovMmap: return "DelayKcovMmap";
+ case Feature::SandboxNone: return "SandboxNone";
case Feature::SandboxSetuid: return "SandboxSetuid";
case Feature::SandboxNamespace: return "SandboxNamespace";
case Feature::SandboxAndroid: return "SandboxAndroid";
@@ -166,33 +177,36 @@ enum class HostMessagesRaw : uint8_t {
ExecRequest = 1,
SignalUpdate = 2,
StartLeakChecks = 3,
+ StateRequest = 4,
MIN = NONE,
- MAX = StartLeakChecks
+ MAX = StateRequest
};
-inline const HostMessagesRaw (&EnumValuesHostMessagesRaw())[4] {
+inline const HostMessagesRaw (&EnumValuesHostMessagesRaw())[5] {
static const HostMessagesRaw values[] = {
HostMessagesRaw::NONE,
HostMessagesRaw::ExecRequest,
HostMessagesRaw::SignalUpdate,
- HostMessagesRaw::StartLeakChecks
+ HostMessagesRaw::StartLeakChecks,
+ HostMessagesRaw::StateRequest
};
return values;
}
inline const char * const *EnumNamesHostMessagesRaw() {
- static const char * const names[5] = {
+ static const char * const names[6] = {
"NONE",
"ExecRequest",
"SignalUpdate",
"StartLeakChecks",
+ "StateRequest",
nullptr
};
return names;
}
inline const char *EnumNameHostMessagesRaw(HostMessagesRaw e) {
- if (flatbuffers::IsOutRange(e, HostMessagesRaw::NONE, HostMessagesRaw::StartLeakChecks)) return "";
+ if (flatbuffers::IsOutRange(e, HostMessagesRaw::NONE, HostMessagesRaw::StateRequest)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesHostMessagesRaw()[index];
}
@@ -213,6 +227,10 @@ template<> struct HostMessagesRawTraits<rpc::StartLeakChecksRaw> {
static const HostMessagesRaw enum_value = HostMessagesRaw::StartLeakChecks;
};
+template<> struct HostMessagesRawTraits<rpc::StateRequestRaw> {
+ static const HostMessagesRaw enum_value = HostMessagesRaw::StateRequest;
+};
+
template<typename T> struct HostMessagesRawUnionTraits {
static const HostMessagesRaw enum_value = HostMessagesRaw::NONE;
};
@@ -229,6 +247,10 @@ template<> struct HostMessagesRawUnionTraits<rpc::StartLeakChecksRawT> {
static const HostMessagesRaw enum_value = HostMessagesRaw::StartLeakChecks;
};
+template<> struct HostMessagesRawUnionTraits<rpc::StateRequestRawT> {
+ static const HostMessagesRaw enum_value = HostMessagesRaw::StateRequest;
+};
+
struct HostMessagesRawUnion {
HostMessagesRaw type;
void *value;
@@ -283,6 +305,14 @@ struct HostMessagesRawUnion {
return type == HostMessagesRaw::StartLeakChecks ?
reinterpret_cast<const rpc::StartLeakChecksRawT *>(value) : nullptr;
}
+ rpc::StateRequestRawT *AsStateRequest() {
+ return type == HostMessagesRaw::StateRequest ?
+ reinterpret_cast<rpc::StateRequestRawT *>(value) : nullptr;
+ }
+ const rpc::StateRequestRawT *AsStateRequest() const {
+ return type == HostMessagesRaw::StateRequest ?
+ reinterpret_cast<const rpc::StateRequestRawT *>(value) : nullptr;
+ }
};
bool VerifyHostMessagesRaw(flatbuffers::Verifier &verifier, const void *obj, HostMessagesRaw type);
@@ -292,31 +322,34 @@ enum class ExecutorMessagesRaw : uint8_t {
NONE = 0,
ExecResult = 1,
Executing = 2,
+ State = 3,
MIN = NONE,
- MAX = Executing
+ MAX = State
};
-inline const ExecutorMessagesRaw (&EnumValuesExecutorMessagesRaw())[3] {
+inline const ExecutorMessagesRaw (&EnumValuesExecutorMessagesRaw())[4] {
static const ExecutorMessagesRaw values[] = {
ExecutorMessagesRaw::NONE,
ExecutorMessagesRaw::ExecResult,
- ExecutorMessagesRaw::Executing
+ ExecutorMessagesRaw::Executing,
+ ExecutorMessagesRaw::State
};
return values;
}
inline const char * const *EnumNamesExecutorMessagesRaw() {
- static const char * const names[4] = {
+ static const char * const names[5] = {
"NONE",
"ExecResult",
"Executing",
+ "State",
nullptr
};
return names;
}
inline const char *EnumNameExecutorMessagesRaw(ExecutorMessagesRaw e) {
- if (flatbuffers::IsOutRange(e, ExecutorMessagesRaw::NONE, ExecutorMessagesRaw::Executing)) return "";
+ if (flatbuffers::IsOutRange(e, ExecutorMessagesRaw::NONE, ExecutorMessagesRaw::State)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesExecutorMessagesRaw()[index];
}
@@ -333,6 +366,10 @@ template<> struct ExecutorMessagesRawTraits<rpc::ExecutingMessageRaw> {
static const ExecutorMessagesRaw enum_value = ExecutorMessagesRaw::Executing;
};
+template<> struct ExecutorMessagesRawTraits<rpc::StateResultRaw> {
+ static const ExecutorMessagesRaw enum_value = ExecutorMessagesRaw::State;
+};
+
template<typename T> struct ExecutorMessagesRawUnionTraits {
static const ExecutorMessagesRaw enum_value = ExecutorMessagesRaw::NONE;
};
@@ -345,6 +382,10 @@ template<> struct ExecutorMessagesRawUnionTraits<rpc::ExecutingMessageRawT> {
static const ExecutorMessagesRaw enum_value = ExecutorMessagesRaw::Executing;
};
+template<> struct ExecutorMessagesRawUnionTraits<rpc::StateResultRawT> {
+ static const ExecutorMessagesRaw enum_value = ExecutorMessagesRaw::State;
+};
+
struct ExecutorMessagesRawUnion {
ExecutorMessagesRaw type;
void *value;
@@ -391,6 +432,14 @@ struct ExecutorMessagesRawUnion {
return type == ExecutorMessagesRaw::Executing ?
reinterpret_cast<const rpc::ExecutingMessageRawT *>(value) : nullptr;
}
+ rpc::StateResultRawT *AsState() {
+ return type == ExecutorMessagesRaw::State ?
+ reinterpret_cast<rpc::StateResultRawT *>(value) : nullptr;
+ }
+ const rpc::StateResultRawT *AsState() const {
+ return type == ExecutorMessagesRaw::State ?
+ reinterpret_cast<const rpc::StateResultRawT *>(value) : nullptr;
+ }
};
bool VerifyExecutorMessagesRaw(flatbuffers::Verifier &verifier, const void *obj, ExecutorMessagesRaw type);
@@ -398,18 +447,16 @@ bool VerifyExecutorMessagesRawVector(flatbuffers::Verifier &verifier, const flat
enum class RequestFlag : uint64_t {
IsBinary = 1ULL,
- ResetState = 2ULL,
- ReturnOutput = 4ULL,
- ReturnError = 8ULL,
+ ReturnOutput = 2ULL,
+ ReturnError = 4ULL,
NONE = 0,
- ANY = 15ULL
+ ANY = 7ULL
};
FLATBUFFERS_DEFINE_BITMASK_OPERATORS(RequestFlag, uint64_t)
-inline const RequestFlag (&EnumValuesRequestFlag())[4] {
+inline const RequestFlag (&EnumValuesRequestFlag())[3] {
static const RequestFlag values[] = {
RequestFlag::IsBinary,
- RequestFlag::ResetState,
RequestFlag::ReturnOutput,
RequestFlag::ReturnError
};
@@ -417,14 +464,10 @@ inline const RequestFlag (&EnumValuesRequestFlag())[4] {
}
inline const char * const *EnumNamesRequestFlag() {
- static const char * const names[9] = {
+ static const char * const names[5] = {
"IsBinary",
- "ResetState",
- "",
"ReturnOutput",
"",
- "",
- "",
"ReturnError",
nullptr
};
@@ -440,29 +483,33 @@ inline const char *EnumNameRequestFlag(RequestFlag e) {
enum class ExecEnv : uint64_t {
Debug = 1ULL,
Signal = 2ULL,
- SandboxSetuid = 4ULL,
- SandboxNamespace = 8ULL,
- SandboxAndroid = 16ULL,
- ExtraCover = 32ULL,
- EnableTun = 64ULL,
- EnableNetDev = 128ULL,
- EnableNetReset = 256ULL,
- EnableCgroups = 512ULL,
- EnableCloseFds = 1024ULL,
- EnableDevlinkPCI = 2048ULL,
- EnableVhciInjection = 4096ULL,
- EnableWifi = 8192ULL,
- DelayKcovMmap = 16384ULL,
- EnableNicVF = 32768ULL,
+ ResetState = 4ULL,
+ SandboxNone = 8ULL,
+ SandboxSetuid = 16ULL,
+ SandboxNamespace = 32ULL,
+ SandboxAndroid = 64ULL,
+ ExtraCover = 128ULL,
+ EnableTun = 256ULL,
+ EnableNetDev = 512ULL,
+ EnableNetReset = 1024ULL,
+ EnableCgroups = 2048ULL,
+ EnableCloseFds = 4096ULL,
+ EnableDevlinkPCI = 8192ULL,
+ EnableVhciInjection = 16384ULL,
+ EnableWifi = 32768ULL,
+ DelayKcovMmap = 65536ULL,
+ EnableNicVF = 131072ULL,
NONE = 0,
- ANY = 65535ULL
+ ANY = 262143ULL
};
FLATBUFFERS_DEFINE_BITMASK_OPERATORS(ExecEnv, uint64_t)
-inline const ExecEnv (&EnumValuesExecEnv())[16] {
+inline const ExecEnv (&EnumValuesExecEnv())[18] {
static const ExecEnv values[] = {
ExecEnv::Debug,
ExecEnv::Signal,
+ ExecEnv::ResetState,
+ ExecEnv::SandboxNone,
ExecEnv::SandboxSetuid,
ExecEnv::SandboxNamespace,
ExecEnv::SandboxAndroid,
@@ -485,6 +532,8 @@ inline const char *EnumNameExecEnv(ExecEnv e) {
switch (e) {
case ExecEnv::Debug: return "Debug";
case ExecEnv::Signal: return "Signal";
+ case ExecEnv::ResetState: return "ResetState";
+ case ExecEnv::SandboxNone: return "SandboxNone";
case ExecEnv::SandboxSetuid: return "SandboxSetuid";
case ExecEnv::SandboxNamespace: return "SandboxNamespace";
case ExecEnv::SandboxAndroid: return "SandboxAndroid";
@@ -757,8 +806,11 @@ flatbuffers::Offset<ConnectRequestRaw> CreateConnectRequestRaw(flatbuffers::Flat
struct ConnectReplyRawT : public flatbuffers::NativeTable {
typedef ConnectReplyRaw TableType;
bool debug = false;
+ bool cover = false;
int32_t procs = 0;
int32_t slowdown = 0;
+ int32_t syscall_timeout_ms = 0;
+ int32_t program_timeout_ms = 0;
std::vector<std::string> leak_frames{};
std::vector<std::string> race_frames{};
rpc::Feature features = static_cast<rpc::Feature>(0);
@@ -771,23 +823,35 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef ConnectReplyRawBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_DEBUG = 4,
- VT_PROCS = 6,
- VT_SLOWDOWN = 8,
- VT_LEAK_FRAMES = 10,
- VT_RACE_FRAMES = 12,
- VT_FEATURES = 14,
- VT_FILES = 16,
- VT_GLOBS = 18
+ VT_COVER = 6,
+ VT_PROCS = 8,
+ VT_SLOWDOWN = 10,
+ VT_SYSCALL_TIMEOUT_MS = 12,
+ VT_PROGRAM_TIMEOUT_MS = 14,
+ VT_LEAK_FRAMES = 16,
+ VT_RACE_FRAMES = 18,
+ VT_FEATURES = 20,
+ VT_FILES = 22,
+ VT_GLOBS = 24
};
bool debug() const {
return GetField<uint8_t>(VT_DEBUG, 0) != 0;
}
+ bool cover() const {
+ return GetField<uint8_t>(VT_COVER, 0) != 0;
+ }
int32_t procs() const {
return GetField<int32_t>(VT_PROCS, 0);
}
int32_t slowdown() const {
return GetField<int32_t>(VT_SLOWDOWN, 0);
}
+ int32_t syscall_timeout_ms() const {
+ return GetField<int32_t>(VT_SYSCALL_TIMEOUT_MS, 0);
+ }
+ int32_t program_timeout_ms() const {
+ return GetField<int32_t>(VT_PROGRAM_TIMEOUT_MS, 0);
+ }
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *leak_frames() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_LEAK_FRAMES);
}
@@ -806,8 +870,11 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<uint8_t>(verifier, VT_DEBUG, 1) &&
+ VerifyField<uint8_t>(verifier, VT_COVER, 1) &&
VerifyField<int32_t>(verifier, VT_PROCS, 4) &&
VerifyField<int32_t>(verifier, VT_SLOWDOWN, 4) &&
+ VerifyField<int32_t>(verifier, VT_SYSCALL_TIMEOUT_MS, 4) &&
+ VerifyField<int32_t>(verifier, VT_PROGRAM_TIMEOUT_MS, 4) &&
VerifyOffset(verifier, VT_LEAK_FRAMES) &&
verifier.VerifyVector(leak_frames()) &&
verifier.VerifyVectorOfStrings(leak_frames()) &&
@@ -835,12 +902,21 @@ struct ConnectReplyRawBuilder {
void add_debug(bool debug) {
fbb_.AddElement<uint8_t>(ConnectReplyRaw::VT_DEBUG, static_cast<uint8_t>(debug), 0);
}
+ void add_cover(bool cover) {
+ fbb_.AddElement<uint8_t>(ConnectReplyRaw::VT_COVER, static_cast<uint8_t>(cover), 0);
+ }
void add_procs(int32_t procs) {
fbb_.AddElement<int32_t>(ConnectReplyRaw::VT_PROCS, procs, 0);
}
void add_slowdown(int32_t slowdown) {
fbb_.AddElement<int32_t>(ConnectReplyRaw::VT_SLOWDOWN, slowdown, 0);
}
+ void add_syscall_timeout_ms(int32_t syscall_timeout_ms) {
+ fbb_.AddElement<int32_t>(ConnectReplyRaw::VT_SYSCALL_TIMEOUT_MS, syscall_timeout_ms, 0);
+ }
+ void add_program_timeout_ms(int32_t program_timeout_ms) {
+ fbb_.AddElement<int32_t>(ConnectReplyRaw::VT_PROGRAM_TIMEOUT_MS, program_timeout_ms, 0);
+ }
void add_leak_frames(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> leak_frames) {
fbb_.AddOffset(ConnectReplyRaw::VT_LEAK_FRAMES, leak_frames);
}
@@ -870,8 +946,11 @@ struct ConnectReplyRawBuilder {
inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(
flatbuffers::FlatBufferBuilder &_fbb,
bool debug = false,
+ bool cover = false,
int32_t procs = 0,
int32_t slowdown = 0,
+ int32_t syscall_timeout_ms = 0,
+ int32_t program_timeout_ms = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> leak_frames = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> race_frames = 0,
rpc::Feature features = static_cast<rpc::Feature>(0),
@@ -883,8 +962,11 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(
builder_.add_files(files);
builder_.add_race_frames(race_frames);
builder_.add_leak_frames(leak_frames);
+ builder_.add_program_timeout_ms(program_timeout_ms);
+ builder_.add_syscall_timeout_ms(syscall_timeout_ms);
builder_.add_slowdown(slowdown);
builder_.add_procs(procs);
+ builder_.add_cover(cover);
builder_.add_debug(debug);
return builder_.Finish();
}
@@ -892,8 +974,11 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(
inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRawDirect(
flatbuffers::FlatBufferBuilder &_fbb,
bool debug = false,
+ bool cover = false,
int32_t procs = 0,
int32_t slowdown = 0,
+ int32_t syscall_timeout_ms = 0,
+ int32_t program_timeout_ms = 0,
const std::vector<flatbuffers::Offset<flatbuffers::String>> *leak_frames = nullptr,
const std::vector<flatbuffers::Offset<flatbuffers::String>> *race_frames = nullptr,
rpc::Feature features = static_cast<rpc::Feature>(0),
@@ -906,8 +991,11 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRawDirect(
return rpc::CreateConnectReplyRaw(
_fbb,
debug,
+ cover,
procs,
slowdown,
+ syscall_timeout_ms,
+ program_timeout_ms,
leak_frames__,
race_frames__,
features,
@@ -1392,6 +1480,9 @@ struct HostMessageRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const rpc::StartLeakChecksRaw *msg_as_StartLeakChecks() const {
return msg_type() == rpc::HostMessagesRaw::StartLeakChecks ? static_cast<const rpc::StartLeakChecksRaw *>(msg()) : nullptr;
}
+ const rpc::StateRequestRaw *msg_as_StateRequest() const {
+ return msg_type() == rpc::HostMessagesRaw::StateRequest ? static_cast<const rpc::StateRequestRaw *>(msg()) : nullptr;
+ }
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<uint8_t>(verifier, VT_MSG_TYPE, 1) &&
@@ -1416,6 +1507,10 @@ template<> inline const rpc::StartLeakChecksRaw *HostMessageRaw::msg_as<rpc::Sta
return msg_as_StartLeakChecks();
}
+template<> inline const rpc::StateRequestRaw *HostMessageRaw::msg_as<rpc::StateRequestRaw>() const {
+ return msg_as_StateRequest();
+}
+
struct HostMessageRawBuilder {
typedef HostMessageRaw Table;
flatbuffers::FlatBufferBuilder &fbb_;
@@ -1474,6 +1569,9 @@ struct ExecutorMessageRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const rpc::ExecutingMessageRaw *msg_as_Executing() const {
return msg_type() == rpc::ExecutorMessagesRaw::Executing ? static_cast<const rpc::ExecutingMessageRaw *>(msg()) : nullptr;
}
+ const rpc::StateResultRaw *msg_as_State() const {
+ return msg_type() == rpc::ExecutorMessagesRaw::State ? static_cast<const rpc::StateResultRaw *>(msg()) : nullptr;
+ }
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<uint8_t>(verifier, VT_MSG_TYPE, 1) &&
@@ -1494,6 +1592,10 @@ template<> inline const rpc::ExecutingMessageRaw *ExecutorMessageRaw::msg_as<rpc
return msg_as_Executing();
}
+template<> inline const rpc::StateResultRaw *ExecutorMessageRaw::msg_as<rpc::StateResultRaw>() const {
+ return msg_as_State();
+}
+
struct ExecutorMessageRawBuilder {
typedef ExecutorMessageRaw Table;
flatbuffers::FlatBufferBuilder &fbb_;
@@ -1533,10 +1635,7 @@ struct ExecRequestRawT : public flatbuffers::NativeTable {
std::vector<uint8_t> prog_data{};
std::unique_ptr<rpc::ExecOptsRaw> exec_opts{};
rpc::RequestFlag flags = static_cast<rpc::RequestFlag>(0);
- std::vector<uint64_t> signal_filter{};
- int32_t signal_filter_call = 0;
std::vector<int32_t> all_signal{};
- int32_t repeat = 0;
ExecRequestRawT() = default;
ExecRequestRawT(const ExecRequestRawT &o);
ExecRequestRawT(ExecRequestRawT&&) FLATBUFFERS_NOEXCEPT = default;
@@ -1551,10 +1650,7 @@ struct ExecRequestRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VT_PROG_DATA = 6,
VT_EXEC_OPTS = 8,
VT_FLAGS = 10,
- VT_SIGNAL_FILTER = 12,
- VT_SIGNAL_FILTER_CALL = 14,
- VT_ALL_SIGNAL = 16,
- VT_REPEAT = 18
+ VT_ALL_SIGNAL = 12
};
int64_t id() const {
return GetField<int64_t>(VT_ID, 0);
@@ -1568,18 +1664,9 @@ struct ExecRequestRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
rpc::RequestFlag flags() const {
return static_cast<rpc::RequestFlag>(GetField<uint64_t>(VT_FLAGS, 0));
}
- const flatbuffers::Vector<uint64_t> *signal_filter() const {
- return GetPointer<const flatbuffers::Vector<uint64_t> *>(VT_SIGNAL_FILTER);
- }
- int32_t signal_filter_call() const {
- return GetField<int32_t>(VT_SIGNAL_FILTER_CALL, 0);
- }
const flatbuffers::Vector<int32_t> *all_signal() const {
return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_ALL_SIGNAL);
}
- int32_t repeat() const {
- return GetField<int32_t>(VT_REPEAT, 0);
- }
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int64_t>(verifier, VT_ID, 8) &&
@@ -1587,12 +1674,8 @@ struct ExecRequestRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
verifier.VerifyVector(prog_data()) &&
VerifyField<rpc::ExecOptsRaw>(verifier, VT_EXEC_OPTS, 8) &&
VerifyField<uint64_t>(verifier, VT_FLAGS, 8) &&
- VerifyOffset(verifier, VT_SIGNAL_FILTER) &&
- verifier.VerifyVector(signal_filter()) &&
- VerifyField<int32_t>(verifier, VT_SIGNAL_FILTER_CALL, 4) &&
VerifyOffset(verifier, VT_ALL_SIGNAL) &&
verifier.VerifyVector(all_signal()) &&
- VerifyField<int32_t>(verifier, VT_REPEAT, 4) &&
verifier.EndTable();
}
ExecRequestRawT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
@@ -1616,18 +1699,9 @@ struct ExecRequestRawBuilder {
void add_flags(rpc::RequestFlag flags) {
fbb_.AddElement<uint64_t>(ExecRequestRaw::VT_FLAGS, static_cast<uint64_t>(flags), 0);
}
- void add_signal_filter(flatbuffers::Offset<flatbuffers::Vector<uint64_t>> signal_filter) {
- fbb_.AddOffset(ExecRequestRaw::VT_SIGNAL_FILTER, signal_filter);
- }
- void add_signal_filter_call(int32_t signal_filter_call) {
- fbb_.AddElement<int32_t>(ExecRequestRaw::VT_SIGNAL_FILTER_CALL, signal_filter_call, 0);
- }
void add_all_signal(flatbuffers::Offset<flatbuffers::Vector<int32_t>> all_signal) {
fbb_.AddOffset(ExecRequestRaw::VT_ALL_SIGNAL, all_signal);
}
- void add_repeat(int32_t repeat) {
- fbb_.AddElement<int32_t>(ExecRequestRaw::VT_REPEAT, repeat, 0);
- }
explicit ExecRequestRawBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
@@ -1645,17 +1719,11 @@ inline flatbuffers::Offset<ExecRequestRaw> CreateExecRequestRaw(
flatbuffers::Offset<flatbuffers::Vector<uint8_t>> prog_data = 0,
const rpc::ExecOptsRaw *exec_opts = nullptr,
rpc::RequestFlag flags = static_cast<rpc::RequestFlag>(0),
- flatbuffers::Offset<flatbuffers::Vector<uint64_t>> signal_filter = 0,
- int32_t signal_filter_call = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> all_signal = 0,
- int32_t repeat = 0) {
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> all_signal = 0) {
ExecRequestRawBuilder builder_(_fbb);
builder_.add_flags(flags);
builder_.add_id(id);
- builder_.add_repeat(repeat);
builder_.add_all_signal(all_signal);
- builder_.add_signal_filter_call(signal_filter_call);
- builder_.add_signal_filter(signal_filter);
builder_.add_exec_opts(exec_opts);
builder_.add_prog_data(prog_data);
return builder_.Finish();
@@ -1667,12 +1735,8 @@ inline flatbuffers::Offset<ExecRequestRaw> CreateExecRequestRawDirect(
const std::vector<uint8_t> *prog_data = nullptr,
const rpc::ExecOptsRaw *exec_opts = nullptr,
rpc::RequestFlag flags = static_cast<rpc::RequestFlag>(0),
- const std::vector<uint64_t> *signal_filter = nullptr,
- int32_t signal_filter_call = 0,
- const std::vector<int32_t> *all_signal = nullptr,
- int32_t repeat = 0) {
+ const std::vector<int32_t> *all_signal = nullptr) {
auto prog_data__ = prog_data ? _fbb.CreateVector<uint8_t>(*prog_data) : 0;
- auto signal_filter__ = signal_filter ? _fbb.CreateVector<uint64_t>(*signal_filter) : 0;
auto all_signal__ = all_signal ? _fbb.CreateVector<int32_t>(*all_signal) : 0;
return rpc::CreateExecRequestRaw(
_fbb,
@@ -1680,10 +1744,7 @@ inline flatbuffers::Offset<ExecRequestRaw> CreateExecRequestRawDirect(
prog_data__,
exec_opts,
flags,
- signal_filter__,
- signal_filter_call,
- all_signal__,
- repeat);
+ all_signal__);
}
flatbuffers::Offset<ExecRequestRaw> CreateExecRequestRaw(flatbuffers::FlatBufferBuilder &_fbb, const ExecRequestRawT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
@@ -1804,6 +1865,45 @@ inline flatbuffers::Offset<StartLeakChecksRaw> CreateStartLeakChecksRaw(
flatbuffers::Offset<StartLeakChecksRaw> CreateStartLeakChecksRaw(flatbuffers::FlatBufferBuilder &_fbb, const StartLeakChecksRawT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+struct StateRequestRawT : public flatbuffers::NativeTable {
+ typedef StateRequestRaw TableType;
+};
+
+struct StateRequestRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef StateRequestRawT NativeTableType;
+ typedef StateRequestRawBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ verifier.EndTable();
+ }
+ StateRequestRawT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(StateRequestRawT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<StateRequestRaw> Pack(flatbuffers::FlatBufferBuilder &_fbb, const StateRequestRawT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StateRequestRawBuilder {
+ typedef StateRequestRaw Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit StateRequestRawBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<StateRequestRaw> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<StateRequestRaw>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<StateRequestRaw> CreateStateRequestRaw(
+ flatbuffers::FlatBufferBuilder &_fbb) {
+ StateRequestRawBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<StateRequestRaw> CreateStateRequestRaw(flatbuffers::FlatBufferBuilder &_fbb, const StateRequestRawT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
struct ExecutingMessageRawT : public flatbuffers::NativeTable {
typedef ExecutingMessageRaw TableType;
int64_t id = 0;
@@ -2010,6 +2110,7 @@ flatbuffers::Offset<CallInfoRaw> CreateCallInfoRaw(flatbuffers::FlatBufferBuilde
struct ProgInfoRawT : public flatbuffers::NativeTable {
typedef ProgInfoRaw TableType;
std::vector<std::unique_ptr<rpc::CallInfoRawT>> calls{};
+ std::vector<std::unique_ptr<rpc::CallInfoRawT>> extra_raw{};
std::unique_ptr<rpc::CallInfoRawT> extra{};
uint64_t elapsed = 0;
uint64_t freshness = 0;
@@ -2024,13 +2125,17 @@ struct ProgInfoRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef ProgInfoRawBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_CALLS = 4,
- VT_EXTRA = 6,
- VT_ELAPSED = 8,
- VT_FRESHNESS = 10
+ VT_EXTRA_RAW = 6,
+ VT_EXTRA = 8,
+ VT_ELAPSED = 10,
+ VT_FRESHNESS = 12
};
const flatbuffers::Vector<flatbuffers::Offset<rpc::CallInfoRaw>> *calls() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<rpc::CallInfoRaw>> *>(VT_CALLS);
}
+ const flatbuffers::Vector<flatbuffers::Offset<rpc::CallInfoRaw>> *extra_raw() const {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<rpc::CallInfoRaw>> *>(VT_EXTRA_RAW);
+ }
const rpc::CallInfoRaw *extra() const {
return GetPointer<const rpc::CallInfoRaw *>(VT_EXTRA);
}
@@ -2045,6 +2150,9 @@ struct ProgInfoRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VerifyOffset(verifier, VT_CALLS) &&
verifier.VerifyVector(calls()) &&
verifier.VerifyVectorOfTables(calls()) &&
+ VerifyOffset(verifier, VT_EXTRA_RAW) &&
+ verifier.VerifyVector(extra_raw()) &&
+ verifier.VerifyVectorOfTables(extra_raw()) &&
VerifyOffset(verifier, VT_EXTRA) &&
verifier.VerifyTable(extra()) &&
VerifyField<uint64_t>(verifier, VT_ELAPSED, 8) &&
@@ -2063,6 +2171,9 @@ struct ProgInfoRawBuilder {
void add_calls(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<rpc::CallInfoRaw>>> calls) {
fbb_.AddOffset(ProgInfoRaw::VT_CALLS, calls);
}
+ void add_extra_raw(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<rpc::CallInfoRaw>>> extra_raw) {
+ fbb_.AddOffset(ProgInfoRaw::VT_EXTRA_RAW, extra_raw);
+ }
void add_extra(flatbuffers::Offset<rpc::CallInfoRaw> extra) {
fbb_.AddOffset(ProgInfoRaw::VT_EXTRA, extra);
}
@@ -2086,6 +2197,7 @@ struct ProgInfoRawBuilder {
inline flatbuffers::Offset<ProgInfoRaw> CreateProgInfoRaw(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<rpc::CallInfoRaw>>> calls = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<rpc::CallInfoRaw>>> extra_raw = 0,
flatbuffers::Offset<rpc::CallInfoRaw> extra = 0,
uint64_t elapsed = 0,
uint64_t freshness = 0) {
@@ -2093,6 +2205,7 @@ inline flatbuffers::Offset<ProgInfoRaw> CreateProgInfoRaw(
builder_.add_freshness(freshness);
builder_.add_elapsed(elapsed);
builder_.add_extra(extra);
+ builder_.add_extra_raw(extra_raw);
builder_.add_calls(calls);
return builder_.Finish();
}
@@ -2100,13 +2213,16 @@ inline flatbuffers::Offset<ProgInfoRaw> CreateProgInfoRaw(
inline flatbuffers::Offset<ProgInfoRaw> CreateProgInfoRawDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const std::vector<flatbuffers::Offset<rpc::CallInfoRaw>> *calls = nullptr,
+ const std::vector<flatbuffers::Offset<rpc::CallInfoRaw>> *extra_raw = nullptr,
flatbuffers::Offset<rpc::CallInfoRaw> extra = 0,
uint64_t elapsed = 0,
uint64_t freshness = 0) {
auto calls__ = calls ? _fbb.CreateVector<flatbuffers::Offset<rpc::CallInfoRaw>>(*calls) : 0;
+ auto extra_raw__ = extra_raw ? _fbb.CreateVector<flatbuffers::Offset<rpc::CallInfoRaw>>(*extra_raw) : 0;
return rpc::CreateProgInfoRaw(
_fbb,
calls__,
+ extra_raw__,
extra,
elapsed,
freshness);
@@ -2222,6 +2338,68 @@ inline flatbuffers::Offset<ExecResultRaw> CreateExecResultRawDirect(
flatbuffers::Offset<ExecResultRaw> CreateExecResultRaw(flatbuffers::FlatBufferBuilder &_fbb, const ExecResultRawT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+struct StateResultRawT : public flatbuffers::NativeTable {
+ typedef StateResultRaw TableType;
+ std::vector<uint8_t> data{};
+};
+
+struct StateResultRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef StateResultRawT NativeTableType;
+ typedef StateResultRawBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+ VT_DATA = 4
+ };
+ const flatbuffers::Vector<uint8_t> *data() const {
+ return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DATA);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyOffset(verifier, VT_DATA) &&
+ verifier.VerifyVector(data()) &&
+ verifier.EndTable();
+ }
+ StateResultRawT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(StateResultRawT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<StateResultRaw> Pack(flatbuffers::FlatBufferBuilder &_fbb, const StateResultRawT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StateResultRawBuilder {
+ typedef StateResultRaw Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_data(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data) {
+ fbb_.AddOffset(StateResultRaw::VT_DATA, data);
+ }
+ explicit StateResultRawBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<StateResultRaw> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<StateResultRaw>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<StateResultRaw> CreateStateResultRaw(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data = 0) {
+ StateResultRawBuilder builder_(_fbb);
+ builder_.add_data(data);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<StateResultRaw> CreateStateResultRawDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<uint8_t> *data = nullptr) {
+ auto data__ = data ? _fbb.CreateVector<uint8_t>(*data) : 0;
+ return rpc::CreateStateResultRaw(
+ _fbb,
+ data__);
+}
+
+flatbuffers::Offset<StateResultRaw> CreateStateResultRaw(flatbuffers::FlatBufferBuilder &_fbb, const StateResultRawT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
inline ConnectRequestRawT *ConnectRequestRaw::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = std::unique_ptr<ConnectRequestRawT>(new ConnectRequestRawT());
UnPackTo(_o.get(), _resolver);
@@ -2267,8 +2445,11 @@ inline void ConnectReplyRaw::UnPackTo(ConnectReplyRawT *_o, const flatbuffers::r
(void)_o;
(void)_resolver;
{ auto _e = debug(); _o->debug = _e; }
+ { auto _e = cover(); _o->cover = _e; }
{ auto _e = procs(); _o->procs = _e; }
{ auto _e = slowdown(); _o->slowdown = _e; }
+ { auto _e = syscall_timeout_ms(); _o->syscall_timeout_ms = _e; }
+ { auto _e = program_timeout_ms(); _o->program_timeout_ms = _e; }
{ auto _e = leak_frames(); if (_e) { _o->leak_frames.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->leak_frames[_i] = _e->Get(_i)->str(); } } }
{ auto _e = race_frames(); if (_e) { _o->race_frames.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->race_frames[_i] = _e->Get(_i)->str(); } } }
{ auto _e = features(); _o->features = _e; }
@@ -2285,8 +2466,11 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(flatbuffers::F
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConnectReplyRawT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _debug = _o->debug;
+ auto _cover = _o->cover;
auto _procs = _o->procs;
auto _slowdown = _o->slowdown;
+ auto _syscall_timeout_ms = _o->syscall_timeout_ms;
+ auto _program_timeout_ms = _o->program_timeout_ms;
auto _leak_frames = _o->leak_frames.size() ? _fbb.CreateVectorOfStrings(_o->leak_frames) : 0;
auto _race_frames = _o->race_frames.size() ? _fbb.CreateVectorOfStrings(_o->race_frames) : 0;
auto _features = _o->features;
@@ -2295,8 +2479,11 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(flatbuffers::F
return rpc::CreateConnectReplyRaw(
_fbb,
_debug,
+ _cover,
_procs,
_slowdown,
+ _syscall_timeout_ms,
+ _program_timeout_ms,
_leak_frames,
_race_frames,
_features,
@@ -2542,10 +2729,7 @@ inline ExecRequestRawT::ExecRequestRawT(const ExecRequestRawT &o)
prog_data(o.prog_data),
exec_opts((o.exec_opts) ? new rpc::ExecOptsRaw(*o.exec_opts) : nullptr),
flags(o.flags),
- signal_filter(o.signal_filter),
- signal_filter_call(o.signal_filter_call),
- all_signal(o.all_signal),
- repeat(o.repeat) {
+ all_signal(o.all_signal) {
}
inline ExecRequestRawT &ExecRequestRawT::operator=(ExecRequestRawT o) FLATBUFFERS_NOEXCEPT {
@@ -2553,10 +2737,7 @@ inline ExecRequestRawT &ExecRequestRawT::operator=(ExecRequestRawT o) FLATBUFFER
std::swap(prog_data, o.prog_data);
std::swap(exec_opts, o.exec_opts);
std::swap(flags, o.flags);
- std::swap(signal_filter, o.signal_filter);
- std::swap(signal_filter_call, o.signal_filter_call);
std::swap(all_signal, o.all_signal);
- std::swap(repeat, o.repeat);
return *this;
}
@@ -2573,10 +2754,7 @@ inline void ExecRequestRaw::UnPackTo(ExecRequestRawT *_o, const flatbuffers::res
{ auto _e = prog_data(); if (_e) { _o->prog_data.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->prog_data.begin()); } }
{ auto _e = exec_opts(); if (_e) _o->exec_opts = std::unique_ptr<rpc::ExecOptsRaw>(new rpc::ExecOptsRaw(*_e)); }
{ auto _e = flags(); _o->flags = _e; }
- { auto _e = signal_filter(); if (_e) { _o->signal_filter.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->signal_filter[_i] = _e->Get(_i); } } }
- { auto _e = signal_filter_call(); _o->signal_filter_call = _e; }
{ auto _e = all_signal(); if (_e) { _o->all_signal.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->all_signal[_i] = _e->Get(_i); } } }
- { auto _e = repeat(); _o->repeat = _e; }
}
inline flatbuffers::Offset<ExecRequestRaw> ExecRequestRaw::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExecRequestRawT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
@@ -2591,20 +2769,14 @@ inline flatbuffers::Offset<ExecRequestRaw> CreateExecRequestRaw(flatbuffers::Fla
auto _prog_data = _o->prog_data.size() ? _fbb.CreateVector(_o->prog_data) : 0;
auto _exec_opts = _o->exec_opts ? _o->exec_opts.get() : nullptr;
auto _flags = _o->flags;
- auto _signal_filter = _o->signal_filter.size() ? _fbb.CreateVector(_o->signal_filter) : 0;
- auto _signal_filter_call = _o->signal_filter_call;
auto _all_signal = _o->all_signal.size() ? _fbb.CreateVector(_o->all_signal) : 0;
- auto _repeat = _o->repeat;
return rpc::CreateExecRequestRaw(
_fbb,
_id,
_prog_data,
_exec_opts,
_flags,
- _signal_filter,
- _signal_filter_call,
- _all_signal,
- _repeat);
+ _all_signal);
}
inline SignalUpdateRawT *SignalUpdateRaw::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
@@ -2659,6 +2831,29 @@ inline flatbuffers::Offset<StartLeakChecksRaw> CreateStartLeakChecksRaw(flatbuff
_fbb);
}
+inline StateRequestRawT *StateRequestRaw::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = std::unique_ptr<StateRequestRawT>(new StateRequestRawT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void StateRequestRaw::UnPackTo(StateRequestRawT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<StateRequestRaw> StateRequestRaw::Pack(flatbuffers::FlatBufferBuilder &_fbb, const StateRequestRawT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateStateRequestRaw(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<StateRequestRaw> CreateStateRequestRaw(flatbuffers::FlatBufferBuilder &_fbb, const StateRequestRawT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const StateRequestRawT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ return rpc::CreateStateRequestRaw(
+ _fbb);
+}
+
inline ExecutingMessageRawT *ExecutingMessageRaw::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = std::unique_ptr<ExecutingMessageRawT>(new ExecutingMessageRawT());
UnPackTo(_o.get(), _resolver);
@@ -2738,10 +2933,13 @@ inline ProgInfoRawT::ProgInfoRawT(const ProgInfoRawT &o)
freshness(o.freshness) {
calls.reserve(o.calls.size());
for (const auto &calls_ : o.calls) { calls.emplace_back((calls_) ? new rpc::CallInfoRawT(*calls_) : nullptr); }
+ extra_raw.reserve(o.extra_raw.size());
+ for (const auto &extra_raw_ : o.extra_raw) { extra_raw.emplace_back((extra_raw_) ? new rpc::CallInfoRawT(*extra_raw_) : nullptr); }
}
inline ProgInfoRawT &ProgInfoRawT::operator=(ProgInfoRawT o) FLATBUFFERS_NOEXCEPT {
std::swap(calls, o.calls);
+ std::swap(extra_raw, o.extra_raw);
std::swap(extra, o.extra);
std::swap(elapsed, o.elapsed);
std::swap(freshness, o.freshness);
@@ -2758,6 +2956,7 @@ inline void ProgInfoRaw::UnPackTo(ProgInfoRawT *_o, const flatbuffers::resolver_
(void)_o;
(void)_resolver;
{ auto _e = calls(); if (_e) { _o->calls.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->calls[_i] = std::unique_ptr<rpc::CallInfoRawT>(_e->Get(_i)->UnPack(_resolver)); } } }
+ { auto _e = extra_raw(); if (_e) { _o->extra_raw.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->extra_raw[_i] = std::unique_ptr<rpc::CallInfoRawT>(_e->Get(_i)->UnPack(_resolver)); } } }
{ auto _e = extra(); if (_e) _o->extra = std::unique_ptr<rpc::CallInfoRawT>(_e->UnPack(_resolver)); }
{ auto _e = elapsed(); _o->elapsed = _e; }
{ auto _e = freshness(); _o->freshness = _e; }
@@ -2772,12 +2971,14 @@ inline flatbuffers::Offset<ProgInfoRaw> CreateProgInfoRaw(flatbuffers::FlatBuffe
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ProgInfoRawT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _calls = _o->calls.size() ? _fbb.CreateVector<flatbuffers::Offset<rpc::CallInfoRaw>> (_o->calls.size(), [](size_t i, _VectorArgs *__va) { return CreateCallInfoRaw(*__va->__fbb, __va->__o->calls[i].get(), __va->__rehasher); }, &_va ) : 0;
+ auto _extra_raw = _o->extra_raw.size() ? _fbb.CreateVector<flatbuffers::Offset<rpc::CallInfoRaw>> (_o->extra_raw.size(), [](size_t i, _VectorArgs *__va) { return CreateCallInfoRaw(*__va->__fbb, __va->__o->extra_raw[i].get(), __va->__rehasher); }, &_va ) : 0;
auto _extra = _o->extra ? CreateCallInfoRaw(_fbb, _o->extra.get(), _rehasher) : 0;
auto _elapsed = _o->elapsed;
auto _freshness = _o->freshness;
return rpc::CreateProgInfoRaw(
_fbb,
_calls,
+ _extra_raw,
_extra,
_elapsed,
_freshness);
@@ -2833,6 +3034,32 @@ inline flatbuffers::Offset<ExecResultRaw> CreateExecResultRaw(flatbuffers::FlatB
_info);
}
+inline StateResultRawT *StateResultRaw::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = std::unique_ptr<StateResultRawT>(new StateResultRawT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void StateResultRaw::UnPackTo(StateResultRawT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = data(); if (_e) { _o->data.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->data.begin()); } }
+}
+
+inline flatbuffers::Offset<StateResultRaw> StateResultRaw::Pack(flatbuffers::FlatBufferBuilder &_fbb, const StateResultRawT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateStateResultRaw(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<StateResultRaw> CreateStateResultRaw(flatbuffers::FlatBufferBuilder &_fbb, const StateResultRawT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const StateResultRawT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _data = _o->data.size() ? _fbb.CreateVector(_o->data) : 0;
+ return rpc::CreateStateResultRaw(
+ _fbb,
+ _data);
+}
+
inline bool VerifyHostMessagesRaw(flatbuffers::Verifier &verifier, const void *obj, HostMessagesRaw type) {
switch (type) {
case HostMessagesRaw::NONE: {
@@ -2850,6 +3077,10 @@ inline bool VerifyHostMessagesRaw(flatbuffers::Verifier &verifier, const void *o
auto ptr = reinterpret_cast<const rpc::StartLeakChecksRaw *>(obj);
return verifier.VerifyTable(ptr);
}
+ case HostMessagesRaw::StateRequest: {
+ auto ptr = reinterpret_cast<const rpc::StateRequestRaw *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
default: return true;
}
}
@@ -2881,6 +3112,10 @@ inline void *HostMessagesRawUnion::UnPack(const void *obj, HostMessagesRaw type,
auto ptr = reinterpret_cast<const rpc::StartLeakChecksRaw *>(obj);
return ptr->UnPack(resolver);
}
+ case HostMessagesRaw::StateRequest: {
+ auto ptr = reinterpret_cast<const rpc::StateRequestRaw *>(obj);
+ return ptr->UnPack(resolver);
+ }
default: return nullptr;
}
}
@@ -2900,6 +3135,10 @@ inline flatbuffers::Offset<void> HostMessagesRawUnion::Pack(flatbuffers::FlatBuf
auto ptr = reinterpret_cast<const rpc::StartLeakChecksRawT *>(value);
return CreateStartLeakChecksRaw(_fbb, ptr, _rehasher).Union();
}
+ case HostMessagesRaw::StateRequest: {
+ auto ptr = reinterpret_cast<const rpc::StateRequestRawT *>(value);
+ return CreateStateRequestRaw(_fbb, ptr, _rehasher).Union();
+ }
default: return 0;
}
}
@@ -2918,6 +3157,10 @@ inline HostMessagesRawUnion::HostMessagesRawUnion(const HostMessagesRawUnion &u)
value = new rpc::StartLeakChecksRawT(*reinterpret_cast<rpc::StartLeakChecksRawT *>(u.value));
break;
}
+ case HostMessagesRaw::StateRequest: {
+ value = new rpc::StateRequestRawT(*reinterpret_cast<rpc::StateRequestRawT *>(u.value));
+ break;
+ }
default:
break;
}
@@ -2940,6 +3183,11 @@ inline void HostMessagesRawUnion::Reset() {
delete ptr;
break;
}
+ case HostMessagesRaw::StateRequest: {
+ auto ptr = reinterpret_cast<rpc::StateRequestRawT *>(value);
+ delete ptr;
+ break;
+ }
default: break;
}
value = nullptr;
@@ -2959,6 +3207,10 @@ inline bool VerifyExecutorMessagesRaw(flatbuffers::Verifier &verifier, const voi
auto ptr = reinterpret_cast<const rpc::ExecutingMessageRaw *>(obj);
return verifier.VerifyTable(ptr);
}
+ case ExecutorMessagesRaw::State: {
+ auto ptr = reinterpret_cast<const rpc::StateResultRaw *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
default: return true;
}
}
@@ -2986,6 +3238,10 @@ inline void *ExecutorMessagesRawUnion::UnPack(const void *obj, ExecutorMessagesR
auto ptr = reinterpret_cast<const rpc::ExecutingMessageRaw *>(obj);
return ptr->UnPack(resolver);
}
+ case ExecutorMessagesRaw::State: {
+ auto ptr = reinterpret_cast<const rpc::StateResultRaw *>(obj);
+ return ptr->UnPack(resolver);
+ }
default: return nullptr;
}
}
@@ -3001,6 +3257,10 @@ inline flatbuffers::Offset<void> ExecutorMessagesRawUnion::Pack(flatbuffers::Fla
auto ptr = reinterpret_cast<const rpc::ExecutingMessageRawT *>(value);
return CreateExecutingMessageRaw(_fbb, ptr, _rehasher).Union();
}
+ case ExecutorMessagesRaw::State: {
+ auto ptr = reinterpret_cast<const rpc::StateResultRawT *>(value);
+ return CreateStateResultRaw(_fbb, ptr, _rehasher).Union();
+ }
default: return 0;
}
}
@@ -3015,6 +3275,10 @@ inline ExecutorMessagesRawUnion::ExecutorMessagesRawUnion(const ExecutorMessages
value = new rpc::ExecutingMessageRawT(*reinterpret_cast<rpc::ExecutingMessageRawT *>(u.value));
break;
}
+ case ExecutorMessagesRaw::State: {
+ value = new rpc::StateResultRawT(*reinterpret_cast<rpc::StateResultRawT *>(u.value));
+ break;
+ }
default:
break;
}
@@ -3032,6 +3296,11 @@ inline void ExecutorMessagesRawUnion::Reset() {
delete ptr;
break;
}
+ case ExecutorMessagesRaw::State: {
+ auto ptr = reinterpret_cast<rpc::StateResultRawT *>(value);
+ delete ptr;
+ break;
+ }
default: break;
}
value = nullptr;
diff --git a/pkg/flatrpc/helpers.go b/pkg/flatrpc/helpers.go
index 22dc893fe..b85951615 100644
--- a/pkg/flatrpc/helpers.go
+++ b/pkg/flatrpc/helpers.go
@@ -4,6 +4,7 @@
package flatrpc
import (
+ "fmt"
"slices"
"syscall"
)
@@ -25,6 +26,7 @@ type HostMessage = HostMessageRawT
type ExecutorMessages = ExecutorMessagesRawT
type ExecutorMessage = ExecutorMessageRawT
type ExecRequest = ExecRequestRawT
+type StateRequest = StateRequestRawT
type SignalUpdate = SignalUpdateRawT
type StartLeakChecks = StartLeakChecksRawT
type ExecutingMessage = ExecutingMessageRawT
@@ -33,6 +35,7 @@ type Comparison = ComparisonRawT
type ExecOpts = ExecOptsRawT
type ProgInfo = ProgInfoRawT
type ExecResult = ExecResultRawT
+type StateResult = StateResultRawT
func (pi *ProgInfo) Clone() *ProgInfo {
if pi == nil {
@@ -71,9 +74,30 @@ func EmptyProgInfo(calls int) *ProgInfo {
return info
}
-func (eo ExecOpts) MergeFlags(diff ExecOpts) ExecOpts {
- ret := eo
- ret.ExecFlags |= diff.ExecFlags
- ret.EnvFlags |= diff.EnvFlags
- return ret
+func SandboxToFlags(sandbox string) (ExecEnv, error) {
+ switch sandbox {
+ case "none":
+ return ExecEnvSandboxNone, nil
+ case "setuid":
+ return ExecEnvSandboxSetuid, nil
+ case "namespace":
+ return ExecEnvSandboxNamespace, nil
+ case "android":
+ return ExecEnvSandboxAndroid, nil
+ default:
+ return 0, fmt.Errorf("sandbox must contain one of none/setuid/namespace/android")
+ }
+}
+
+func FlagsToSandbox(flags ExecEnv) string {
+ if flags&ExecEnvSandboxNone != 0 {
+ return "none"
+ } else if flags&ExecEnvSandboxSetuid != 0 {
+ return "setuid"
+ } else if flags&ExecEnvSandboxNamespace != 0 {
+ return "namespace"
+ } else if flags&ExecEnvSandboxAndroid != 0 {
+ return "android"
+ }
+ panic("no sandbox flags present")
}
diff --git a/pkg/fuzzer/fuzzer.go b/pkg/fuzzer/fuzzer.go
index 92d8b0f8d..5b95f4eec 100644
--- a/pkg/fuzzer/fuzzer.go
+++ b/pkg/fuzzer/fuzzer.go
@@ -166,7 +166,6 @@ func (fuzzer *Fuzzer) processResult(req *queue.Request, res *queue.Result, flags
type Config struct {
Debug bool
Corpus *corpus.Corpus
- BaseOpts flatrpc.ExecOpts // Fuzzer will use BaseOpts as a base for all requests.
Logf func(level int, msg string, args ...interface{})
Coverage bool
FaultInjection bool
@@ -251,7 +250,6 @@ func (fuzzer *Fuzzer) Next() *queue.Request {
// The fuzzer is not supposed to issue nil requests.
panic("nil request from the fuzzer")
}
- req.ExecOpts = fuzzer.Config.BaseOpts.MergeFlags(req.ExecOpts)
return req
}
diff --git a/pkg/fuzzer/fuzzer_test.go b/pkg/fuzzer/fuzzer_test.go
index 206469fda..55ec09666 100644
--- a/pkg/fuzzer/fuzzer_test.go
+++ b/pkg/fuzzer/fuzzer_test.go
@@ -13,6 +13,7 @@ import (
"runtime"
"strings"
"sync"
+ "sync/atomic"
"testing"
"time"
@@ -20,14 +21,13 @@ import (
"github.com/google/syzkaller/pkg/csource"
"github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
- "github.com/google/syzkaller/pkg/ipc"
- "github.com/google/syzkaller/pkg/ipc/ipcconfig"
+ "github.com/google/syzkaller/pkg/rpcserver"
"github.com/google/syzkaller/pkg/signal"
"github.com/google/syzkaller/pkg/testutil"
+ "github.com/google/syzkaller/pkg/vminfo"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/targets"
"github.com/stretchr/testify/assert"
- "golang.org/x/sync/errgroup"
)
func TestFuzz(t *testing.T) {
@@ -42,15 +42,14 @@ func TestFuzz(t *testing.T) {
t.Skipf("skipping, broken cross-compiler: %v", sysTarget.BrokenCompiler)
}
executor := csource.BuildExecutor(t, target, "../..", "-fsanitize-coverage=trace-pc", "-g")
+
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- _, opts, _ := ipcconfig.Default(target)
corpusUpdates := make(chan corpus.NewItemEvent)
fuzzer := NewFuzzer(ctx, &Config{
- Debug: true,
- BaseOpts: *opts,
- Corpus: corpus.NewMonitoredCorpus(ctx, corpusUpdates),
+ Debug: true,
+ Corpus: corpus.NewMonitoredCorpus(ctx, corpusUpdates),
Logf: func(level int, msg string, args ...interface{}) {
if level > 1 {
return
@@ -74,24 +73,24 @@ func TestFuzz(t *testing.T) {
}
}()
- tf := newTestFuzzer(t, fuzzer, map[string]bool{
- "first bug": true,
- "second bug": true,
- }, 10000)
-
- for i := 0; i < 2; i++ {
- tf.registerExecutor(newProc(t, target, executor))
+ tf := &testFuzzer{
+ t: t,
+ target: target,
+ fuzzer: fuzzer,
+ executor: executor,
+ iterLimit: 10000,
+ expectedCrashes: map[string]bool{
+ "first bug": true,
+ "second bug": true,
+ },
}
- tf.wait()
+ tf.run()
t.Logf("resulting corpus:")
for _, p := range fuzzer.Config.Corpus.Programs() {
t.Logf("-----")
t.Logf("%s", p.Serialize())
}
-
- assert.Equal(t, len(tf.expectedCrashes), len(tf.crashes),
- "not all expected crashes were found")
}
func BenchmarkFuzzer(b *testing.B) {
@@ -204,114 +203,87 @@ func emulateExec(req *queue.Request) (*queue.Result, string, error) {
type testFuzzer struct {
t testing.TB
- eg errgroup.Group
+ target *prog.Target
fuzzer *Fuzzer
+ executor string
mu sync.Mutex
crashes map[string]int
expectedCrashes map[string]bool
iter int
iterLimit int
+ done func()
+ finished atomic.Bool
+}
+
+func (f *testFuzzer) run() {
+ f.crashes = make(map[string]int)
+ ctx, done := context.WithCancel(context.Background())
+ f.done = done
+ cfg := &rpcserver.LocalConfig{
+ Config: rpcserver.Config{
+ Config: vminfo.Config{
+ Target: f.target,
+ Features: flatrpc.FeatureSandboxNone,
+ Sandbox: flatrpc.ExecEnvSandboxNone,
+ },
+ Procs: 4,
+ Slowdown: 1,
+ },
+ Executor: f.executor,
+ Dir: f.t.TempDir(),
+ Context: ctx,
+ }
+ cfg.MachineChecked = func(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source {
+ cfg.Cover = true
+ return f
+ }
+ if err := rpcserver.RunLocal(cfg); err != nil {
+ f.t.Fatal(err)
+ }
+ assert.Equal(f.t, len(f.expectedCrashes), len(f.crashes), "not all expected crashes were found")
}
-func newTestFuzzer(t testing.TB, fuzzer *Fuzzer, expectedCrashes map[string]bool, iterLimit int) *testFuzzer {
- return &testFuzzer{
- t: t,
- fuzzer: fuzzer,
- expectedCrashes: expectedCrashes,
- crashes: map[string]int{},
- iterLimit: iterLimit,
+func (f *testFuzzer) Next() *queue.Request {
+ if f.finished.Load() {
+ return nil
}
+ req := f.fuzzer.Next()
+ req.ExecOpts.EnvFlags |= flatrpc.ExecEnvSignal | flatrpc.ExecEnvSandboxNone
+ req.ReturnOutput = true
+ req.ReturnError = true
+ req.OnDone(f.OnDone)
+ return req
}
-func (f *testFuzzer) oneMore() bool {
+func (f *testFuzzer) OnDone(req *queue.Request, res *queue.Result) bool {
+ // TODO: support hints emulation.
+ match := crashRe.FindSubmatch(res.Output)
f.mu.Lock()
defer f.mu.Unlock()
+ if match != nil {
+ crash := string(match[1])
+ f.t.Logf("CRASH: %s", crash)
+ res.Status = queue.Crashed
+ if !f.expectedCrashes[crash] {
+ f.t.Errorf("unexpected crash: %q", crash)
+ }
+ f.crashes[crash]++
+ }
f.iter++
if f.iter%100 == 0 {
f.t.Logf("<iter %d>: corpus %d, signal %d, max signal %d, crash types %d, running jobs %d",
f.iter, f.fuzzer.Config.Corpus.StatProgs.Val(), f.fuzzer.Config.Corpus.StatSignal.Val(),
len(f.fuzzer.Cover.maxSignal), len(f.crashes), f.fuzzer.statJobs.Val())
}
- return f.iter < f.iterLimit &&
- (f.expectedCrashes == nil || len(f.crashes) != len(f.expectedCrashes))
-}
-
-func (f *testFuzzer) registerExecutor(proc *executorProc) {
- f.eg.Go(func() error {
- for f.oneMore() {
- req := f.fuzzer.Next()
- res, crash, err := proc.execute(req)
- if err != nil {
- return err
- }
- if crash != "" {
- res = &queue.Result{Status: queue.Crashed}
- if !f.expectedCrashes[crash] {
- return fmt.Errorf("unexpected crash: %q", crash)
- }
- f.mu.Lock()
- f.t.Logf("CRASH: %s", crash)
- f.crashes[crash]++
- f.mu.Unlock()
- }
- req.Done(res)
- }
- return nil
- })
-}
-
-func (f *testFuzzer) wait() {
- t := f.t
- err := f.eg.Wait()
- if err != nil {
- t.Fatal(err)
- }
- t.Logf("crashes:")
- for title, cnt := range f.crashes {
- t.Logf("%s: %d", title, cnt)
- }
-}
-
-// TODO: it's already implemented in syz-fuzzer/proc.go,
-// pkg/runtest and tools/syz-execprog.
-// Looks like it's time to factor out this functionality.
-type executorProc struct {
- env *ipc.Env
- execOpts flatrpc.ExecOpts
-}
-
-func newProc(t *testing.T, target *prog.Target, executor string) *executorProc {
- config, execOpts, err := ipcconfig.Default(target)
- if err != nil {
- t.Fatal(err)
- }
- config.Executor = executor
- execOpts.EnvFlags |= flatrpc.ExecEnvSignal
- env, err := ipc.MakeEnv(config, 0)
- if err != nil {
- t.Fatal(err)
- }
- t.Cleanup(func() { env.Close() })
- return &executorProc{
- env: env,
- execOpts: *execOpts,
+ if !f.finished.Load() && (f.iter > f.iterLimit || len(f.crashes) == len(f.expectedCrashes)) {
+ f.done()
+ f.finished.Store(true)
}
+ return true
}
var crashRe = regexp.MustCompile(`{{CRASH: (.*?)}}`)
-func (proc *executorProc) execute(req *queue.Request) (*queue.Result, string, error) {
- // TODO: support hints emulation.
- output, info, _, err := proc.env.Exec(&req.ExecOpts, req.Prog)
- ret := crashRe.FindStringSubmatch(string(output))
- if ret != nil {
- return nil, ret[1], nil
- } else if err != nil {
- return nil, "", err
- }
- return &queue.Result{Info: info}, "", nil
-}
-
func checkGoroutineLeaks() {
// Inspired by src/net/http/main_test.go.
buf := make([]byte, 2<<20)
diff --git a/pkg/fuzzer/job.go b/pkg/fuzzer/job.go
index 0f6e0309c..0268172a9 100644
--- a/pkg/fuzzer/job.go
+++ b/pkg/fuzzer/job.go
@@ -121,6 +121,7 @@ func (job *triageJob) handleCall(call int, info *triageCall) {
}
if job.flags&ProgSmashed == 0 {
job.fuzzer.startJob(job.fuzzer.statJobsSmash, &smashJob{
+ exec: job.fuzzer.smashQueue,
p: p.Clone(),
call: call,
})
@@ -240,11 +241,10 @@ func (job *triageJob) minimize(call int, info *triageCall) (*prog.Prog, int) {
}
for i := 0; i < minimizeAttempts; i++ {
result := job.execute(&queue.Request{
- Prog: p1,
- ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal),
- SignalFilter: info.newStableSignal,
- SignalFilterCall: call1,
- Stat: job.fuzzer.statExecMinimize,
+ Prog: p1,
+ ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal),
+ ReturnAllSignal: []int{call1},
+ Stat: job.fuzzer.statExecMinimize,
}, 0)
if result.Stop() {
stop = true
@@ -294,6 +294,7 @@ func getSignalAndCover(p *prog.Prog, info *flatrpc.ProgInfo, call int) signal.Si
}
type smashJob struct {
+ exec queue.Executor
p *prog.Prog
call int
}
@@ -302,6 +303,7 @@ func (job *smashJob) run(fuzzer *Fuzzer) {
fuzzer.Logf(2, "smashing the program %s (call=%d):", job.p, job.call)
if fuzzer.Config.Comparisons && job.call >= 0 {
fuzzer.startJob(fuzzer.statJobsHints, &hintsJob{
+ exec: fuzzer.smashQueue,
p: job.p.Clone(),
call: job.call,
})
@@ -315,7 +317,7 @@ func (job *smashJob) run(fuzzer *Fuzzer) {
fuzzer.ChoiceTable(),
fuzzer.Config.NoMutateCalls,
fuzzer.Config.Corpus.Programs())
- result := fuzzer.execute(fuzzer.smashQueue, &queue.Request{
+ result := fuzzer.execute(job.exec, &queue.Request{
Prog: p,
ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal),
Stat: fuzzer.statExecSmash,
@@ -324,7 +326,7 @@ func (job *smashJob) run(fuzzer *Fuzzer) {
return
}
if fuzzer.Config.Collide {
- result := fuzzer.execute(fuzzer.smashQueue, &queue.Request{
+ result := fuzzer.execute(job.exec, &queue.Request{
Prog: randomCollide(p, rnd),
Stat: fuzzer.statExecCollide,
})
@@ -366,7 +368,7 @@ func (job *smashJob) faultInjection(fuzzer *Fuzzer) {
job.call, nth)
newProg := job.p.Clone()
newProg.Calls[job.call].Props.FailNth = nth
- result := fuzzer.execute(fuzzer.smashQueue, &queue.Request{
+ result := fuzzer.execute(job.exec, &queue.Request{
Prog: newProg,
Stat: fuzzer.statExecFaultInject,
})
@@ -382,6 +384,7 @@ func (job *smashJob) faultInjection(fuzzer *Fuzzer) {
}
type hintsJob struct {
+ exec queue.Executor
p *prog.Prog
call int
}
@@ -393,7 +396,7 @@ func (job *hintsJob) run(fuzzer *Fuzzer) {
var comps prog.CompMap
for i := 0; i < 2; i++ {
- result := fuzzer.execute(fuzzer.smashQueue, &queue.Request{
+ result := fuzzer.execute(job.exec, &queue.Request{
Prog: p,
ExecOpts: setFlags(flatrpc.ExecFlagCollectComps),
Stat: fuzzer.statExecSeed,
@@ -420,7 +423,7 @@ func (job *hintsJob) run(fuzzer *Fuzzer) {
// Execute each of such mutants to check if it gives new coverage.
p.MutateWithHints(job.call, comps,
func(p *prog.Prog) bool {
- result := fuzzer.execute(fuzzer.smashQueue, &queue.Request{
+ result := fuzzer.execute(job.exec, &queue.Request{
Prog: p,
ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal),
Stat: fuzzer.statExecHint,
diff --git a/pkg/fuzzer/queue/queue.go b/pkg/fuzzer/queue/queue.go
index 46df9d234..051f7205e 100644
--- a/pkg/fuzzer/queue/queue.go
+++ b/pkg/fuzzer/queue/queue.go
@@ -37,7 +37,6 @@ type Request struct {
// Options needed by runtest.
BinaryFile string // If set, it's executed instead of Prog.
- Repeat int // Repeats in addition to the first run.
// Important requests will be retried even from crashed VMs.
Important bool
@@ -113,6 +112,11 @@ func (r *Request) Validate() error {
if (collectComps) && (collectSignal || collectCover) {
return fmt.Errorf("hint collection is mutually exclusive with signal/coverage")
}
+ sandboxes := flatrpc.ExecEnvSandboxNone | flatrpc.ExecEnvSandboxSetuid |
+ flatrpc.ExecEnvSandboxNamespace | flatrpc.ExecEnvSandboxAndroid
+ if r.BinaryFile == "" && r.ExecOpts.EnvFlags&sandboxes == 0 {
+ return fmt.Errorf("no sandboxes set")
+ }
return nil
}
@@ -415,3 +419,24 @@ func (d *Deduplicator) onDone(req *Request, res *Result) bool {
}
return true
}
+
+// DefaultOpts applies opts to all requests in source.
+func DefaultOpts(source Source, opts flatrpc.ExecOpts) Source {
+ return &defaultOpts{source, opts}
+}
+
+type defaultOpts struct {
+ source Source
+ opts flatrpc.ExecOpts
+}
+
+func (do *defaultOpts) Next() *Request {
+ req := do.source.Next()
+ if req == nil {
+ return nil
+ }
+ req.ExecOpts.ExecFlags |= do.opts.ExecFlags
+ req.ExecOpts.EnvFlags |= do.opts.EnvFlags
+ req.ExecOpts.SandboxArg = do.opts.SandboxArg
+ return req
+}
diff --git a/pkg/host/features.go b/pkg/host/features.go
deleted file mode 100644
index 91ea0de7e..000000000
--- a/pkg/host/features.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2018 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package host
-
-import (
- "bytes"
- "fmt"
- "strings"
- "time"
-
- "github.com/google/syzkaller/pkg/csource"
- "github.com/google/syzkaller/pkg/flatrpc"
- "github.com/google/syzkaller/pkg/ipc"
- "github.com/google/syzkaller/pkg/log"
- "github.com/google/syzkaller/pkg/osutil"
- "github.com/google/syzkaller/prog"
- "github.com/google/syzkaller/sys/targets"
-)
-
-// SetupFeatures enables and does any one-time setup for the requested features on the host.
-// Note: this can be called multiple times and must be idempotent.
-func SetupFeatures(target *prog.Target, executor string, mask flatrpc.Feature, flags csource.Features) (
- []*flatrpc.FeatureInfo, error) {
- if noHostChecks(target) {
- return nil, nil
- }
- var results []*flatrpc.FeatureInfo
- resultC := make(chan *flatrpc.FeatureInfo)
- for feat := range flatrpc.EnumNamesFeature {
- feat := feat
- if mask&feat == 0 {
- continue
- }
- opt := ipc.FlatRPCFeaturesToCSource[feat]
- if opt != "" && flags != nil && !flags["binfmt_misc"].Enabled {
- continue
- }
- results = append(results, nil)
- go setupFeature(executor, feat, resultC)
- }
- // Feature 0 setups common things that are not part of any feature.
- setupFeature(executor, 0, nil)
- for i := range results {
- results[i] = <-resultC
- }
- return results, nil
-}
-
-func setupFeature(executor string, feat flatrpc.Feature, resultC chan *flatrpc.FeatureInfo) {
- args := strings.Split(executor, " ")
- executor = args[0]
- args = append(args[1:], "setup", fmt.Sprint(uint64(feat)))
- output, err := osutil.RunCmd(3*time.Minute, "", executor, args...)
- log.Logf(1, "executor %v\n%s", args, bytes.ReplaceAll(output, []byte("SYZFAIL:"), nil))
- outputStr := string(output)
- if err == nil {
- outputStr = ""
- } else if outputStr == "" {
- outputStr = err.Error()
- }
- needSetup := true
- if strings.Contains(outputStr, "feature setup is not needed") {
- needSetup = false
- outputStr = ""
- }
- if resultC != nil {
- resultC <- &flatrpc.FeatureInfo{
- Id: feat,
- NeedSetup: needSetup,
- Reason: outputStr,
- }
- }
-}
-
-func noHostChecks(target *prog.Target) bool {
- // HostFuzzer targets can't run Go binaries on the targets,
- // so we actually run on the host on another OS. The same for targets.TestOS OS.
- return targets.Get(target.OS, target.Arch).HostFuzzer || target.OS == targets.TestOS
-}
diff --git a/pkg/host/machine_info.go b/pkg/host/machine_info.go
deleted file mode 100644
index c51ccf380..000000000
--- a/pkg/host/machine_info.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2020 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package host
-
-import (
- "os"
- "path/filepath"
- "strings"
-
- "github.com/google/syzkaller/pkg/flatrpc"
-)
-
-func ReadFiles(files []string) []*flatrpc.FileInfo {
- var res []*flatrpc.FileInfo
- for _, glob := range files {
- glob = filepath.FromSlash(glob)
- if !strings.Contains(glob, "*") {
- res = append(res, readFile(glob))
- continue
- }
- matches, err := filepath.Glob(glob)
- if err != nil {
- res = append(res, &flatrpc.FileInfo{
- Name: glob,
- Error: err.Error(),
- })
- continue
- }
- for _, file := range matches {
- res = append(res, readFile(file))
- }
- }
- return res
-}
-
-func readFile(file string) *flatrpc.FileInfo {
- data, err := os.ReadFile(file)
- exists, errStr := true, ""
- if err != nil {
- exists, errStr = !os.IsNotExist(err), err.Error()
- }
- return &flatrpc.FileInfo{
- Name: file,
- Exists: exists,
- Error: errStr,
- Data: data,
- }
-}
diff --git a/pkg/instance/instance.go b/pkg/instance/instance.go
index f8ce5cb05..ff7bb9f0d 100644
--- a/pkg/instance/instance.go
+++ b/pkg/instance/instance.go
@@ -91,7 +91,7 @@ func (env *env) BuildSyzkaller(repoURL, commit string) (string, error) {
return "", fmt.Errorf("failed to checkout syzkaller repo: %w", err)
}
// The following commit ("syz-fuzzer: support optional flags") adds support for optional flags
- // in syz-fuzzer and syz-execprog. This is required to invoke older binaries with newer flags
+ // in syz-execprog. This is required to invoke older binaries with newer flags
// without failing due to unknown flags.
optionalFlags, err := repo.Contains("64435345f0891706a7e0c7885f5f7487581e6005")
if err != nil {
@@ -438,53 +438,6 @@ func (inst *inst) csourceOptions() (csource.Options, error) {
return opts, nil
}
-type OptionalFuzzerArgs struct {
- Slowdown int
- SandboxArg int64
- PprofPort int
-}
-
-type FuzzerCmdArgs struct {
- Fuzzer string
- Executor string
- Name string
- OS string
- Arch string
- FwdAddr string
- Sandbox string
- Verbosity int
- Cover bool
- Debug bool
- Optional *OptionalFuzzerArgs
-}
-
-func FuzzerCmd(args *FuzzerCmdArgs) string {
- osArg := ""
- if targets.Get(args.OS, args.Arch).HostFuzzer {
- // Only these OSes need the flag, because the rest assume host OS.
- // But speciying OS for all OSes breaks patch testing on syzbot
- // because old execprog does not have os flag.
- osArg = " -os=" + args.OS
- }
- verbosityArg := ""
- if args.Verbosity != 0 {
- verbosityArg = fmt.Sprintf(" -vv=%v", args.Verbosity)
- }
- optionalArg := ""
- if args.Optional != nil {
- flags := []tool.Flag{
- {Name: "slowdown", Value: fmt.Sprint(args.Optional.Slowdown)},
- {Name: "sandbox_arg", Value: fmt.Sprint(args.Optional.SandboxArg)},
- {Name: "pprof_port", Value: fmt.Sprint(args.Optional.PprofPort)},
- }
- optionalArg = " " + tool.OptionalFlags(flags)
- }
- return fmt.Sprintf("%v -executor=%v -name=%v -arch=%v%v -manager=%v -sandbox=%v"+
- " -cover=%v -debug=%v %v%v",
- args.Fuzzer, args.Executor, args.Name, args.Arch, osArg, args.FwdAddr, args.Sandbox,
- args.Cover, args.Debug, verbosityArg, optionalArg)
-}
-
func ExecprogCmd(execprog, executor, OS, arch, sandbox string, sandboxArg int, repeat, threaded, collide bool,
procs, faultCall, faultNth int, optionalFlags bool, slowdown int, progFile string) string {
repeatCount := 1
diff --git a/pkg/ipc/gate.go b/pkg/ipc/gate.go
deleted file mode 100644
index b1b1f1fc8..000000000
--- a/pkg/ipc/gate.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2015 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package ipc
-
-import (
- "sync"
-)
-
-// Gate limits concurrency level and window to the given value.
-// Limitation of concurrency window means that if a very old activity is still
-// running it will not let new activities to start even if concurrency level is low.
-type Gate struct {
- cv *sync.Cond
- busy []bool
- pos int
- running int
- stop bool
- f func()
-}
-
-// If f is not nil, it will be called after each batch of c activities.
-func NewGate(c int, f func()) *Gate {
- return &Gate{
- cv: sync.NewCond(new(sync.Mutex)),
- busy: make([]bool, c),
- f: f,
- }
-}
-
-func (g *Gate) Enter() int {
- g.cv.L.Lock()
- for g.busy[g.pos] || g.stop {
- g.cv.Wait()
- }
- idx := g.pos
- g.pos++
- if g.pos >= len(g.busy) {
- g.pos = 0
- }
- g.busy[idx] = true
- g.running++
- if g.running > len(g.busy) {
- panic("broken gate")
- }
- g.cv.L.Unlock()
- return idx
-}
-
-func (g *Gate) Leave(idx int) {
- g.cv.L.Lock()
- if !g.busy[idx] {
- panic("broken gate")
- }
- g.busy[idx] = false
- g.running--
- if g.running < 0 {
- panic("broken gate")
- }
- if idx == 0 && g.f != nil {
- if g.stop {
- panic("broken gate")
- }
- g.stop = true
- for g.running != 0 {
- g.cv.Wait()
- }
- g.stop = false
- g.f()
- g.cv.Broadcast()
- }
- if idx == g.pos && !g.stop || g.running == 0 && g.stop {
- g.cv.Broadcast()
- }
- g.cv.L.Unlock()
-}
diff --git a/pkg/ipc/ipc.go b/pkg/ipc/ipc.go
deleted file mode 100644
index c09137e3b..000000000
--- a/pkg/ipc/ipc.go
+++ /dev/null
@@ -1,838 +0,0 @@
-// Copyright 2015 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package ipc
-
-import (
- "fmt"
- "io"
- "os"
- "os/exec"
- "path/filepath"
- "slices"
- "strings"
- "sync"
- "time"
- "unsafe"
-
- "github.com/google/syzkaller/pkg/cover"
- "github.com/google/syzkaller/pkg/csource"
- "github.com/google/syzkaller/pkg/flatrpc"
- "github.com/google/syzkaller/pkg/osutil"
- "github.com/google/syzkaller/pkg/signal"
- "github.com/google/syzkaller/prog"
- "github.com/google/syzkaller/sys/targets"
-)
-
-// Config is the configuration for Env.
-type Config struct {
- // Path to executor binary.
- Executor string
-
- UseForkServer bool // use extended protocol with handshake
- RateLimit bool // rate limit start of new processes for host fuzzer mode
-
- Timeouts targets.Timeouts
-
- CoverFilter []uint64
-}
-
-type Env struct {
- in []byte
- out []byte
-
- cmd *command
- inFile *os.File
- outFile *os.File
- bin []string
- linkedBin string
- pid int
- config *Config
-}
-
-const (
- outputSize = 16 << 20
-
- statusFail = 67
-
- // Comparison types masks taken from KCOV headers.
- compSizeMask = 6
- compSize8 = 6
- compConstMask = 1
-
- extraReplyIndex = 0xffffffff // uint32(-1)
-)
-
-func SandboxToFlags(sandbox string) (flatrpc.ExecEnv, error) {
- switch sandbox {
- case "none":
- return 0, nil
- case "setuid":
- return flatrpc.ExecEnvSandboxSetuid, nil
- case "namespace":
- return flatrpc.ExecEnvSandboxNamespace, nil
- case "android":
- return flatrpc.ExecEnvSandboxAndroid, nil
- default:
- return 0, fmt.Errorf("sandbox must contain one of none/setuid/namespace/android")
- }
-}
-
-func FlagsToSandbox(flags flatrpc.ExecEnv) string {
- if flags&flatrpc.ExecEnvSandboxSetuid != 0 {
- return "setuid"
- } else if flags&flatrpc.ExecEnvSandboxNamespace != 0 {
- return "namespace"
- } else if flags&flatrpc.ExecEnvSandboxAndroid != 0 {
- return "android"
- }
- return "none"
-}
-
-func FeaturesToFlags(features flatrpc.Feature, manual csource.Features) flatrpc.ExecEnv {
- for feat := range flatrpc.EnumNamesFeature {
- opt := FlatRPCFeaturesToCSource[feat]
- if opt != "" && manual != nil && !manual[opt].Enabled {
- features &= ^feat
- }
- }
- var flags flatrpc.ExecEnv
- if manual == nil || manual["net_reset"].Enabled {
- flags |= flatrpc.ExecEnvEnableNetReset
- }
- if manual == nil || manual["cgroups"].Enabled {
- flags |= flatrpc.ExecEnvEnableCgroups
- }
- if manual == nil || manual["close_fds"].Enabled {
- flags |= flatrpc.ExecEnvEnableCloseFds
- }
- if features&flatrpc.FeatureExtraCoverage != 0 {
- flags |= flatrpc.ExecEnvExtraCover
- }
- if features&flatrpc.FeatureDelayKcovMmap != 0 {
- flags |= flatrpc.ExecEnvDelayKcovMmap
- }
- if features&flatrpc.FeatureNetInjection != 0 {
- flags |= flatrpc.ExecEnvEnableTun
- }
- if features&flatrpc.FeatureNetDevices != 0 {
- flags |= flatrpc.ExecEnvEnableNetDev
- }
- if features&flatrpc.FeatureDevlinkPCI != 0 {
- flags |= flatrpc.ExecEnvEnableDevlinkPCI
- }
- if features&flatrpc.FeatureNicVF != 0 {
- flags |= flatrpc.ExecEnvEnableNicVF
- }
- if features&flatrpc.FeatureVhciInjection != 0 {
- flags |= flatrpc.ExecEnvEnableVhciInjection
- }
- if features&flatrpc.FeatureWifiEmulation != 0 {
- flags |= flatrpc.ExecEnvEnableWifi
- }
- return flags
-}
-
-var FlatRPCFeaturesToCSource = map[flatrpc.Feature]string{
- flatrpc.FeatureNetInjection: "tun",
- flatrpc.FeatureNetDevices: "net_dev",
- flatrpc.FeatureDevlinkPCI: "devlink_pci",
- flatrpc.FeatureNicVF: "nic_vf",
- flatrpc.FeatureVhciInjection: "vhci",
- flatrpc.FeatureWifiEmulation: "wifi",
- flatrpc.FeatureUSBEmulation: "usb",
- flatrpc.FeatureBinFmtMisc: "binfmt_misc",
- flatrpc.FeatureLRWPANEmulation: "ieee802154",
- flatrpc.FeatureSwap: "swap",
-}
-
-func MakeEnv(config *Config, pid int) (*Env, error) {
- if config.Timeouts.Slowdown == 0 || config.Timeouts.Scale == 0 ||
- config.Timeouts.Syscall == 0 || config.Timeouts.Program == 0 {
- return nil, fmt.Errorf("ipc.MakeEnv: uninitialized timeouts (%+v)", config.Timeouts)
- }
- var inf, outf *os.File
- var inmem, outmem []byte
- var err error
- inf, inmem, err = osutil.CreateMemMappedFile(prog.ExecBufferSize)
- if err != nil {
- return nil, err
- }
- defer func() {
- if inf != nil {
- osutil.CloseMemMappedFile(inf, inmem)
- }
- }()
- outf, outmem, err = osutil.CreateMemMappedFile(outputSize)
- if err != nil {
- return nil, err
- }
- defer func() {
- if outf != nil {
- osutil.CloseMemMappedFile(outf, outmem)
- }
- }()
- env := &Env{
- in: inmem,
- out: outmem,
- inFile: inf,
- outFile: outf,
- bin: append(strings.Split(config.Executor, " "), "exec"),
- pid: pid,
- config: config,
- }
- if len(env.bin) == 0 {
- return nil, fmt.Errorf("binary is empty string")
- }
- env.bin[0] = osutil.Abs(env.bin[0]) // we are going to chdir
- // Append pid to binary name.
- // E.g. if binary is 'syz-executor' and pid=15,
- // we create a link from 'syz-executor.15' to 'syz-executor' and use 'syz-executor.15' as binary.
- // This allows to easily identify program that lead to a crash in the log.
- // Log contains pid in "executing program 15" and crashes usually contain "Comm: syz-executor.15".
- // Note: pkg/report knowns about this and converts "syz-executor.15" back to "syz-executor".
- base := filepath.Base(env.bin[0])
- pidStr := fmt.Sprintf(".%v", pid)
- const maxLen = 16 // TASK_COMM_LEN is currently set to 16
- if len(base)+len(pidStr) >= maxLen {
- // Remove beginning of file name, in tests temp files have unique numbers at the end.
- base = base[len(base)+len(pidStr)-maxLen+1:]
- }
- binCopy := filepath.Join(filepath.Dir(env.bin[0]), base+pidStr)
- if err := os.Link(env.bin[0], binCopy); err == nil {
- env.bin[0] = binCopy
- env.linkedBin = binCopy
- }
- inf = nil
- outf = nil
- return env, nil
-}
-
-func (env *Env) Close() error {
- if env.cmd != nil {
- env.cmd.close()
- }
- if env.linkedBin != "" {
- os.Remove(env.linkedBin)
- }
- var err1, err2 error
- if env.inFile != nil {
- err1 = osutil.CloseMemMappedFile(env.inFile, env.in)
- }
- if env.outFile != nil {
- err2 = osutil.CloseMemMappedFile(env.outFile, env.out)
- }
- switch {
- case err1 != nil:
- return err1
- case err2 != nil:
- return err2
- default:
- return nil
- }
-}
-
-// Exec starts executor binary to execute program stored in progData in exec encoding
-// and returns information about the execution:
-// output: process output
-// info: per-call info
-// hanged: program hanged and was killed
-// err0: failed to start the process or bug in executor itself.
-func (env *Env) ExecProg(opts *flatrpc.ExecOpts, progData []byte) (
- output []byte, info *flatrpc.ProgInfo, hanged bool, err0 error) {
- ncalls, err := prog.ExecCallCount(progData)
- if err != nil {
- err0 = err
- return
- }
- // Copy-in serialized program.
- copy(env.in, progData)
- // Zero out the first two words (ncmd and nsig), so that we don't have garbage there
- // if executor crashes before writing non-garbage there.
- for i := 0; i < 4; i++ {
- env.out[i] = 0
- }
-
- err0 = env.RestartIfNeeded(opts)
- if err0 != nil {
- return
- }
-
- start := osutil.MonotonicNano()
- output, hanged, err0 = env.cmd.exec(opts)
- elapsed := osutil.MonotonicNano() - start
- if err0 != nil {
- env.cmd.close()
- env.cmd = nil
- return
- }
-
- info, err0 = env.parseOutput(opts, ncalls)
- if info != nil {
- info.Elapsed = uint64(elapsed)
- info.Freshness = env.cmd.freshness
- }
- env.cmd.freshness++
- if !env.config.UseForkServer {
- env.cmd.close()
- env.cmd = nil
- }
- return
-}
-
-func (env *Env) Exec(opts *flatrpc.ExecOpts, p *prog.Prog) (
- output []byte, info *flatrpc.ProgInfo, hanged bool, err0 error) {
- progData, err := p.SerializeForExec()
- if err != nil {
- err0 = err
- return
- }
- return env.ExecProg(opts, progData)
-}
-
-func (env *Env) ForceRestart() {
- if env.cmd != nil {
- env.cmd.close()
- env.cmd = nil
- }
-}
-
-// RestartIfNeeded brings up an executor process if it was stopped.
-func (env *Env) RestartIfNeeded(opts *flatrpc.ExecOpts) error {
- if env.cmd != nil {
- if env.cmd.flags == opts.EnvFlags && env.cmd.sandboxArg == opts.SandboxArg {
- return nil
- }
- env.ForceRestart()
- }
- if env.config.RateLimit {
- rateLimiterOnce.Do(func() {
- rateLimiter = time.NewTicker(1 * time.Second).C
- })
- <-rateLimiter
- }
- var err error
- env.cmd, err = env.makeCommand(opts, "./")
- return err
-}
-
-var (
- rateLimiterOnce sync.Once
- rateLimiter <-chan time.Time
-)
-
-func (env *Env) parseOutput(opts *flatrpc.ExecOpts, ncalls int) (*flatrpc.ProgInfo, error) {
- out := env.out
- ncmd, ok := readUint32(&out)
- if !ok {
- return nil, fmt.Errorf("failed to read number of calls")
- }
- info := flatrpc.EmptyProgInfo(ncalls)
- extraParts := make([]flatrpc.CallInfo, 0)
- for i := uint32(0); i < ncmd; i++ {
- if len(out) < int(unsafe.Sizeof(callReply{})) {
- return nil, fmt.Errorf("failed to read call %v reply", i)
- }
- reply := *(*callReply)(unsafe.Pointer(&out[0]))
- out = out[unsafe.Sizeof(callReply{}):]
- var inf *flatrpc.CallInfo
- if reply.magic != outMagic {
- return nil, fmt.Errorf("bad reply magic 0x%x", reply.magic)
- }
- if reply.index != extraReplyIndex {
- if int(reply.index) >= len(info.Calls) {
- return nil, fmt.Errorf("bad call %v index %v/%v", i, reply.index, len(info.Calls))
- }
- inf = info.Calls[reply.index]
- if inf.Flags != 0 || inf.Signal != nil {
- return nil, fmt.Errorf("duplicate reply for call %v/%v/%v", i, reply.index, reply.num)
- }
- inf.Error = int32(reply.errno)
- inf.Flags = flatrpc.CallFlag(reply.flags)
- } else {
- extraParts = append(extraParts, flatrpc.CallInfo{})
- inf = &extraParts[len(extraParts)-1]
- }
- if inf.Signal, ok = readUint64Array(&out, reply.signalSize); !ok {
- return nil, fmt.Errorf("call %v/%v/%v: signal overflow: %v/%v",
- i, reply.index, reply.num, reply.signalSize, len(out))
- }
- if inf.Cover, ok = readUint64Array(&out, reply.coverSize); !ok {
- return nil, fmt.Errorf("call %v/%v/%v: cover overflow: %v/%v",
- i, reply.index, reply.num, reply.coverSize, len(out))
- }
- comps, err := readComps(&out, reply.compsSize)
- if err != nil {
- return nil, err
- }
- inf.Comps = comps
- }
- if len(extraParts) == 0 {
- return info, nil
- }
- info.Extra = convertExtra(extraParts, opts.ExecFlags&flatrpc.ExecFlagDedupCover != 0)
- return info, nil
-}
-
-func convertExtra(extraParts []flatrpc.CallInfo, dedupCover bool) *flatrpc.CallInfo {
- var extra flatrpc.CallInfo
- if dedupCover {
- extraCover := make(cover.Cover)
- for _, part := range extraParts {
- extraCover.Merge(part.Cover)
- }
- extra.Cover = extraCover.Serialize()
- } else {
- for _, part := range extraParts {
- extra.Cover = append(extra.Cover, part.Cover...)
- }
- }
- extraSignal := make(signal.Signal)
- for _, part := range extraParts {
- extraSignal.Merge(signal.FromRaw(part.Signal, 0))
- }
- extra.Signal = make([]uint64, len(extraSignal))
- i := 0
- for s := range extraSignal {
- extra.Signal[i] = uint64(s)
- i++
- }
- return &extra
-}
-
-func readComps(outp *[]byte, compsSize uint32) ([]*flatrpc.Comparison, error) {
- comps := make([]*flatrpc.Comparison, 0, 2*compsSize)
- for i := uint32(0); i < compsSize; i++ {
- typ, ok := readUint32(outp)
- if !ok {
- return nil, fmt.Errorf("failed to read comp %v", i)
- }
- if typ > compConstMask|compSizeMask {
- return nil, fmt.Errorf("bad comp %v type %v", i, typ)
- }
- var op1, op2 uint64
- var ok1, ok2 bool
- if typ&compSizeMask == compSize8 {
- op1, ok1 = readUint64(outp)
- op2, ok2 = readUint64(outp)
- } else {
- var tmp1, tmp2 uint32
- tmp1, ok1 = readUint32(outp)
- tmp2, ok2 = readUint32(outp)
- op1, op2 = uint64(int64(int32(tmp1))), uint64(int64(int32(tmp2)))
- }
- if !ok1 || !ok2 {
- return nil, fmt.Errorf("failed to read comp %v op", i)
- }
- if op1 == op2 {
- continue // it's useless to store such comparisons
- }
- comps = append(comps, &flatrpc.Comparison{Op1: op2, Op2: op1})
- if (typ & compConstMask) != 0 {
- // If one of the operands was const, then this operand is always
- // placed first in the instrumented callbacks. Such an operand
- // could not be an argument of our syscalls (because otherwise
- // it wouldn't be const), thus we simply ignore it.
- continue
- }
- comps = append(comps, &flatrpc.Comparison{Op1: op1, Op2: op2})
- }
- return comps, nil
-}
-
-func readUint32(outp *[]byte) (uint32, bool) {
- out := *outp
- if len(out) < 4 {
- return 0, false
- }
- v := prog.HostEndian.Uint32(out)
- *outp = out[4:]
- return v, true
-}
-
-func readUint64(outp *[]byte) (uint64, bool) {
- out := *outp
- if len(out) < 8 {
- return 0, false
- }
- v := prog.HostEndian.Uint64(out)
- *outp = out[8:]
- return v, true
-}
-
-func readUint64Array(outp *[]byte, size uint32) ([]uint64, bool) {
- if size == 0 {
- return nil, true
- }
- out := *outp
- dataSize := int(size * 8)
- if dataSize > len(out) {
- return nil, false
- }
- res := unsafe.Slice((*uint64)(unsafe.Pointer(&out[0])), size)
- *outp = out[dataSize:]
- // Detach the resulting array from the original data.
- return slices.Clone(res), true
-}
-
-type command struct {
- pid int
- config *Config
- flags flatrpc.ExecEnv
- sandboxArg int64
- timeout time.Duration
- cmd *exec.Cmd
- dir string
- readDone chan []byte
- exited chan error
- inrp *os.File
- outwp *os.File
- outmem []byte
- freshness uint64
-}
-
-const (
- inMagic = uint64(0xbadc0ffeebadface)
- outMagic = uint32(0xbadf00d)
-)
-
-type handshakeReq struct {
- magic uint64
- flags uint64 // env flags
- pid uint64
- sandboxArg uint64
- coverFilterSize uint64
- // Followed by [coverFilterSize]uint64 filter.
-}
-
-type handshakeReply struct {
- magic uint32
-}
-
-type executeReq struct {
- magic uint64
- envFlags uint64 // env flags
- execFlags uint64 // exec flags
- pid uint64
- syscallTimeoutMS uint64
- programTimeoutMS uint64
- slowdownScale uint64
-}
-
-type executeReply struct {
- magic uint32
- // If done is 0, then this is call completion message followed by callReply.
- // If done is 1, then program execution is finished and status is set.
- done uint32
- status uint32
-}
-
-type callReply struct {
- magic uint32
- index uint32 // call index in the program
- num uint32 // syscall number (for cross-checking)
- errno uint32
- flags uint32 // see CallFlags
- signalSize uint32
- coverSize uint32
- compsSize uint32
- // signal/cover/comps follow
-}
-
-func (env *Env) makeCommand(opts *flatrpc.ExecOpts, tmpDir string) (*command, error) {
- dir, err := os.MkdirTemp(tmpDir, "syzkaller-testdir")
- if err != nil {
- return nil, fmt.Errorf("failed to create temp dir: %w", err)
- }
- dir = osutil.Abs(dir)
-
- timeout := env.config.Timeouts.Program
- if env.config.UseForkServer {
- // Executor has an internal timeout and protects against most hangs when fork server is enabled,
- // so we use quite large timeout. Executor can be slow due to global locks in namespaces
- // and other things, so let's better wait than report false misleading crashes.
- timeout *= 5
- }
-
- c := &command{
- pid: env.pid,
- config: env.config,
- flags: opts.EnvFlags,
- sandboxArg: opts.SandboxArg,
- timeout: timeout,
- dir: dir,
- outmem: env.out,
- }
- defer func() {
- if c != nil {
- c.close()
- }
- }()
-
- if err := os.Chmod(dir, 0777); err != nil {
- return nil, fmt.Errorf("failed to chmod temp dir: %w", err)
- }
-
- // Output capture pipe.
- rp, wp, err := os.Pipe()
- if err != nil {
- return nil, fmt.Errorf("failed to create pipe: %w", err)
- }
- defer wp.Close()
-
- // executor->ipc command pipe.
- inrp, inwp, err := os.Pipe()
- if err != nil {
- return nil, fmt.Errorf("failed to create pipe: %w", err)
- }
- defer inwp.Close()
- c.inrp = inrp
-
- // ipc->executor command pipe.
- outrp, outwp, err := os.Pipe()
- if err != nil {
- return nil, fmt.Errorf("failed to create pipe: %w", err)
- }
- defer outrp.Close()
- c.outwp = outwp
-
- c.readDone = make(chan []byte, 1)
-
- cmd := osutil.Command(env.bin[0], env.bin[1:]...)
- if env.inFile != nil && env.outFile != nil {
- cmd.ExtraFiles = []*os.File{env.inFile, env.outFile}
- }
- cmd.Dir = dir
- // Tell ASAN to not mess with our NONFAILING.
- cmd.Env = append(append([]string{}, os.Environ()...), "ASAN_OPTIONS=handle_segv=0 allow_user_segv_handler=1")
- cmd.Stdin = outrp
- cmd.Stdout = inwp
- if c.flags&flatrpc.ExecEnvDebug != 0 {
- close(c.readDone)
- cmd.Stderr = os.Stdout
- } else {
- cmd.Stderr = wp
- go func(c *command) {
- // Read out output in case executor constantly prints something.
- const bufSize = 128 << 10
- output := make([]byte, bufSize)
- var size uint64
- for {
- n, err := rp.Read(output[size:])
- if n > 0 {
- size += uint64(n)
- if size >= bufSize*3/4 {
- copy(output, output[size-bufSize/2:size])
- size = bufSize / 2
- }
- }
- if err != nil {
- rp.Close()
- c.readDone <- output[:size]
- close(c.readDone)
- return
- }
- }
- }(c)
- }
- if err := cmd.Start(); err != nil {
- return nil, fmt.Errorf("failed to start executor binary: %w", err)
- }
- c.exited = make(chan error, 1)
- c.cmd = cmd
- go func(c *command) {
- err := c.cmd.Wait()
- c.exited <- err
- close(c.exited)
- // Avoid a livelock if cmd.Stderr has been leaked to another alive process.
- rp.SetDeadline(time.Now().Add(5 * time.Second))
- }(c)
- wp.Close()
- // Note: we explicitly close inwp before calling handshake even though we defer it above.
- // If we don't do it and executor exits before writing handshake reply,
- // reading from inrp will hang since we hold another end of the pipe open.
- inwp.Close()
-
- if c.config.UseForkServer {
- if err := c.handshake(); err != nil {
- return nil, err
- }
- }
- tmp := c
- c = nil // disable defer above
- return tmp, nil
-}
-
-func (c *command) close() {
- if c.cmd != nil {
- c.cmd.Process.Kill()
- c.wait()
- }
- osutil.RemoveAll(c.dir)
- if c.inrp != nil {
- c.inrp.Close()
- }
- if c.outwp != nil {
- c.outwp.Close()
- }
-}
-
-// handshake sends handshakeReq and waits for handshakeReply.
-func (c *command) handshake() error {
- req := &handshakeReq{
- magic: inMagic,
- flags: uint64(c.flags),
- pid: uint64(c.pid),
- sandboxArg: uint64(c.sandboxArg),
- coverFilterSize: uint64(len(c.config.CoverFilter)),
- }
- reqData := (*[unsafe.Sizeof(*req)]byte)(unsafe.Pointer(req))[:]
- if _, err := c.outwp.Write(reqData); err != nil {
- return c.handshakeError(fmt.Errorf("failed to write control pipe: %w", err))
- }
- if req.coverFilterSize != 0 {
- ptr := (*byte)(unsafe.Pointer(&c.config.CoverFilter[0]))
- size := uintptr(req.coverFilterSize) * unsafe.Sizeof(c.config.CoverFilter[0])
- coverFilter := unsafe.Slice(ptr, size)
- if _, err := c.outwp.Write(coverFilter); err != nil {
- return c.handshakeError(fmt.Errorf("failed to write control pipe: %w", err))
- }
- }
-
- read := make(chan error, 1)
- go func() {
- reply := &handshakeReply{}
- replyData := (*[unsafe.Sizeof(*reply)]byte)(unsafe.Pointer(reply))[:]
- if _, err := io.ReadFull(c.inrp, replyData); err != nil {
- read <- err
- return
- }
- if reply.magic != outMagic {
- read <- fmt.Errorf("bad handshake reply magic 0x%x", reply.magic)
- return
- }
- read <- nil
- }()
- // Sandbox setup can take significant time.
- timeout := time.NewTimer(time.Minute * c.config.Timeouts.Scale)
- select {
- case err := <-read:
- timeout.Stop()
- if err != nil {
- return c.handshakeError(err)
- }
- return nil
- case <-timeout.C:
- return c.handshakeError(fmt.Errorf("not serving"))
- }
-}
-
-func (c *command) handshakeError(err error) error {
- c.cmd.Process.Kill()
- output := <-c.readDone
- err = fmt.Errorf("executor %v: %w\n%s", c.pid, err, output)
- c.wait()
- return err
-}
-
-func (c *command) wait() error {
- return <-c.exited
-}
-
-func (c *command) exec(opts *flatrpc.ExecOpts) (output []byte, hanged bool, err0 error) {
- if c.flags != opts.EnvFlags || c.sandboxArg != opts.SandboxArg {
- panic("wrong command")
- }
- req := &executeReq{
- magic: inMagic,
- envFlags: uint64(c.flags),
- execFlags: uint64(opts.ExecFlags),
- pid: uint64(c.pid),
- syscallTimeoutMS: uint64(c.config.Timeouts.Syscall / time.Millisecond),
- programTimeoutMS: uint64(c.config.Timeouts.Program / time.Millisecond),
- slowdownScale: uint64(c.config.Timeouts.Scale),
- }
- reqData := (*[unsafe.Sizeof(*req)]byte)(unsafe.Pointer(req))[:]
- if _, err := c.outwp.Write(reqData); err != nil {
- output = <-c.readDone
- err0 = fmt.Errorf("executor %v: failed to write control pipe: %w", c.pid, err)
- return
- }
- // At this point program is executing.
-
- done := make(chan bool)
- hang := make(chan bool)
- go func() {
- t := time.NewTimer(c.timeout)
- select {
- case <-t.C:
- c.cmd.Process.Kill()
- hang <- true
- case <-done:
- t.Stop()
- hang <- false
- }
- }()
- exitStatus := -1
- completedCalls := (*uint32)(unsafe.Pointer(&c.outmem[0]))
- outmem := c.outmem[4:]
- for {
- reply := &executeReply{}
- replyData := (*[unsafe.Sizeof(*reply)]byte)(unsafe.Pointer(reply))[:]
- if _, err := io.ReadFull(c.inrp, replyData); err != nil {
- break
- }
- if reply.magic != outMagic {
- fmt.Fprintf(os.Stderr, "executor %v: got bad reply magic 0x%x\n", c.pid, reply.magic)
- os.Exit(1)
- }
- if reply.done != 0 {
- exitStatus = int(reply.status)
- break
- }
- callReply := &callReply{}
- callReplyData := (*[unsafe.Sizeof(*callReply)]byte)(unsafe.Pointer(callReply))[:]
- if _, err := io.ReadFull(c.inrp, callReplyData); err != nil {
- break
- }
- if callReply.signalSize != 0 || callReply.coverSize != 0 || callReply.compsSize != 0 {
- // This is unsupported yet.
- fmt.Fprintf(os.Stderr, "executor %v: got call reply with coverage\n", c.pid)
- os.Exit(1)
- }
- copy(outmem, callReplyData)
- outmem = outmem[len(callReplyData):]
- *completedCalls++
- }
- close(done)
- if exitStatus == 0 {
- // Program was OK.
- <-hang
- return
- }
- c.cmd.Process.Kill()
- output = <-c.readDone
- err := c.wait()
- if err != nil {
- output = append(output, err.Error()...)
- output = append(output, '\n')
- }
- if <-hang {
- hanged = true
- return
- }
- if exitStatus == -1 {
- if c.cmd.ProcessState == nil {
- exitStatus = statusFail
- } else {
- exitStatus = osutil.ProcessExitStatus(c.cmd.ProcessState)
- }
- }
- // Ignore all other errors.
- // Without fork server executor can legitimately exit (program contains exit_group),
- // with fork server the top process can exit with statusFail if it wants special handling.
- if exitStatus == statusFail {
- err0 = fmt.Errorf("executor %v: exit status %d err %w\n%s", c.pid, exitStatus, err, output)
- }
- return
-}
diff --git a/pkg/ipc/ipc_priv_test.go b/pkg/ipc/ipc_priv_test.go
deleted file mode 100644
index 02c467daf..000000000
--- a/pkg/ipc/ipc_priv_test.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2022 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package ipc
-
-import (
- "testing"
-
- "github.com/google/syzkaller/pkg/flatrpc"
-)
-
-func TestOutputDeadline(t *testing.T) {
- // Run the command that leaks stderr to a child process.
- env := &Env{
- bin: []string{
- "sh",
- "-c",
- "exec 1>&2; ( sleep 100; echo fail ) & echo done",
- },
- pid: 1,
- config: &Config{},
- }
- c, err := env.makeCommand(&flatrpc.ExecOpts{}, t.TempDir())
- if err != nil {
- t.Fatal(err)
- }
- c.wait()
- out := <-c.readDone
- if string(out) != "done\n" {
- t.Errorf("unexpected output: '%s'", out)
- }
-}
diff --git a/pkg/ipc/ipc_test.go b/pkg/ipc/ipc_test.go
deleted file mode 100644
index c70bfe79c..000000000
--- a/pkg/ipc/ipc_test.go
+++ /dev/null
@@ -1,262 +0,0 @@
-// Copyright 2015 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package ipc_test
-
-import (
- "bytes"
- "fmt"
- "math/rand"
- "runtime"
- "testing"
- "time"
-
- "github.com/google/syzkaller/pkg/csource"
- "github.com/google/syzkaller/pkg/flatrpc"
- "github.com/google/syzkaller/pkg/image"
- . "github.com/google/syzkaller/pkg/ipc"
- "github.com/google/syzkaller/pkg/ipc/ipcconfig"
- "github.com/google/syzkaller/pkg/osutil"
- "github.com/google/syzkaller/pkg/testutil"
- "github.com/google/syzkaller/prog"
- _ "github.com/google/syzkaller/sys"
- "github.com/google/syzkaller/sys/targets"
-)
-
-func initTest(t *testing.T) (*prog.Target, rand.Source, int, bool, targets.Timeouts) {
- t.Parallel()
- iters := 100
- if testing.Short() {
- iters = 10
- }
- target, err := prog.GetTarget(runtime.GOOS, runtime.GOARCH)
- if err != nil {
- t.Fatal(err)
- }
- cfg, _, err := ipcconfig.Default(target)
- if err != nil {
- t.Fatal(err)
- }
- rs := testutil.RandSource(t)
- return target, rs, iters, cfg.UseForkServer, cfg.Timeouts
-}
-
-// TestExecutor runs all internal executor unit tests.
-// We do it here because we already build executor binary here.
-func TestExecutor(t *testing.T) {
- t.Parallel()
- for _, sysTarget := range targets.List[runtime.GOOS] {
- sysTarget := targets.Get(runtime.GOOS, sysTarget.Arch)
- t.Run(sysTarget.Arch, func(t *testing.T) {
- if sysTarget.BrokenCompiler != "" {
- t.Skipf("skipping, broken cross-compiler: %v", sysTarget.BrokenCompiler)
- }
- t.Parallel()
- target, err := prog.GetTarget(runtime.GOOS, sysTarget.Arch)
- if err != nil {
- t.Fatal(err)
- }
- bin := csource.BuildExecutor(t, target, "../..")
- // qemu-user may allow us to run some cross-arch binaries.
- if _, err := osutil.RunCmd(time.Minute, "", bin, "test"); err != nil {
- if sysTarget.Arch == runtime.GOARCH || sysTarget.VMArch == runtime.GOARCH {
- t.Fatal(err)
- }
- t.Skipf("skipping, cross-arch binary failed: %v", err)
- }
- })
- }
-}
-
-func prepareTestProgram(target *prog.Target) *prog.Prog {
- p := target.DataMmapProg()
- if len(p.Calls) > 1 {
- p.Calls[1].Props.Async = true
- }
- return p
-}
-
-func TestExecute(t *testing.T) {
- target, _, _, useForkServer, timeouts := initTest(t)
-
- bin := csource.BuildExecutor(t, target, "../..")
-
- flags := []flatrpc.ExecFlag{0, flatrpc.ExecFlagThreaded}
- for _, flag := range flags {
- t.Logf("testing flags 0x%x", flag)
- cfg := &Config{
- Executor: bin,
- UseForkServer: useForkServer,
- Timeouts: timeouts,
- }
- env, err := MakeEnv(cfg, 0)
- if err != nil {
- t.Fatalf("failed to create env: %v", err)
- }
- defer env.Close()
-
- for i := 0; i < 10; i++ {
- p := prepareTestProgram(target)
- opts := &flatrpc.ExecOpts{
- ExecFlags: flag,
- }
- output, info, hanged, err := env.Exec(opts, p)
- if err != nil {
- t.Fatalf("failed to run executor: %v", err)
- }
- if hanged {
- t.Fatalf("program hanged:\n%s", output)
- }
- if len(info.Calls) != len(p.Calls) {
- t.Fatalf("executed less calls (%v) than prog len(%v):\n%s", len(info.Calls), len(p.Calls), output)
- }
- if info.Calls[0].Error != 0 {
- t.Fatalf("simple call failed: %v\n%s", info.Calls[0].Error, output)
- }
- if len(output) != 0 {
- t.Fatalf("output on empty program")
- }
- }
- }
-}
-
-func TestParallel(t *testing.T) {
- target, _, _, useForkServer, timeouts := initTest(t)
- bin := csource.BuildExecutor(t, target, "../..")
- cfg := &Config{
- Executor: bin,
- UseForkServer: useForkServer,
- Timeouts: timeouts,
- }
- const P = 10
- errs := make(chan error, P)
- for p := 0; p < P; p++ {
- p := p
- go func() {
- env, err := MakeEnv(cfg, p)
- if err != nil {
- errs <- fmt.Errorf("failed to create env: %w", err)
- return
- }
- defer func() {
- env.Close()
- errs <- err
- }()
- p := target.DataMmapProg()
- opts := &flatrpc.ExecOpts{}
- output, info, hanged, err := env.Exec(opts, p)
- if err != nil {
- err = fmt.Errorf("failed to run executor: %w", err)
- return
- }
- if hanged {
- err = fmt.Errorf("program hanged:\n%s", output)
- return
- }
- if len(info.Calls) == 0 {
- err = fmt.Errorf("no calls executed:\n%s", output)
- return
- }
- if info.Calls[0].Error != 0 {
- err = fmt.Errorf("simple call failed: %v\n%s", info.Calls[0].Error, output)
- return
- }
- if len(output) != 0 {
- err = fmt.Errorf("output on empty program")
- return
- }
- }()
- }
- for p := 0; p < P; p++ {
- if err := <-errs; err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func TestZlib(t *testing.T) {
- t.Parallel()
- target, err := prog.GetTarget(targets.TestOS, targets.TestArch64)
- if err != nil {
- t.Fatal(err)
- }
- sysTarget := targets.Get(target.OS, target.Arch)
- if sysTarget.BrokenCompiler != "" {
- t.Skipf("skipping, broken cross-compiler: %v", sysTarget.BrokenCompiler)
- }
- cfg, opts, err := ipcconfig.Default(target)
- if err != nil {
- t.Fatal(err)
- }
- opts.EnvFlags |= flatrpc.ExecEnvDebug
- cfg.Executor = csource.BuildExecutor(t, target, "../..")
- env, err := MakeEnv(cfg, 0)
- if err != nil {
- t.Fatalf("failed to create env: %v", err)
- }
- defer env.Close()
- r := rand.New(testutil.RandSource(t))
- for i := 0; i < 10; i++ {
- data := testutil.RandMountImage(r)
- compressed := image.Compress(data)
- text := fmt.Sprintf(`syz_compare_zlib(&(0x7f0000000000)="$%s", AUTO, &(0x7f0000800000)="$%s", AUTO)`,
- image.EncodeB64(data), image.EncodeB64(compressed))
- p, err := target.Deserialize([]byte(text), prog.Strict)
- if err != nil {
- t.Fatalf("failed to deserialize empty program: %v", err)
- }
- output, info, _, err := env.Exec(opts, p)
- if err != nil {
- t.Fatalf("failed to run executor: %v", err)
- }
- if info.Calls[0].Error != 0 {
- t.Fatalf("data comparison failed: %v\n%s", info.Calls[0].Error, output)
- }
- }
-}
-
-func TestExecutorCommonExt(t *testing.T) {
- target, err := prog.GetTarget("test", "64_fork")
- if err != nil {
- t.Fatal(err)
- }
- sysTarget := targets.Get(target.OS, target.Arch)
- if sysTarget.BrokenCompiler != "" {
- t.Skipf("skipping, broken cross-compiler: %v", sysTarget.BrokenCompiler)
- }
- bin := csource.BuildExecutor(t, target, "../..", "-DSYZ_TEST_COMMON_EXT_EXAMPLE=1")
- out, err := osutil.RunCmd(time.Minute, "", bin, "setup", "0")
- if err != nil {
- t.Fatal(err)
- }
- if !bytes.Contains(out, []byte("example setup_ext called")) {
- t.Fatalf("setup_ext wasn't called:\n%s", out)
- }
-
- // The example setup_ext_test does:
- // *(uint64*)(SYZ_DATA_OFFSET + 0x1234) = 0xbadc0ffee;
- // The following program tests that that value is present at 0x1234.
- test := `syz_compare(&(0x7f0000001234)="", 0x8, &(0x7f0000000000)=@blob="eeffc0ad0b000000", AUTO)`
- p, err := target.Deserialize([]byte(test), prog.Strict)
- if err != nil {
- t.Fatal(err)
- }
- cfg, opts, err := ipcconfig.Default(target)
- if err != nil {
- t.Fatal(err)
- }
- cfg.Executor = bin
- opts.EnvFlags |= flatrpc.ExecEnvDebug
- env, err := MakeEnv(cfg, 0)
- if err != nil {
- t.Fatalf("failed to create env: %v", err)
- }
- defer env.Close()
- _, info, _, err := env.Exec(opts, p)
- if err != nil {
- t.Fatal(err)
- }
- if call := info.Calls[0]; call.Flags&flatrpc.CallFlagFinished == 0 || call.Error != 0 {
- t.Fatalf("bad call result: flags=%x errno=%v", call.Flags, call.Error)
- }
-}
diff --git a/pkg/ipc/ipcconfig/ipcconfig.go b/pkg/ipc/ipcconfig/ipcconfig.go
deleted file mode 100644
index aef709a23..000000000
--- a/pkg/ipc/ipcconfig/ipcconfig.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2018 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package ipcconfig
-
-import (
- "flag"
-
- "github.com/google/syzkaller/pkg/flatrpc"
- "github.com/google/syzkaller/pkg/ipc"
- "github.com/google/syzkaller/prog"
- "github.com/google/syzkaller/sys/targets"
-)
-
-var (
- flagExecutor = flag.String("executor", "./syz-executor", "path to executor binary")
- flagThreaded = flag.Bool("threaded", true, "use threaded mode in executor")
- flagSignal = flag.Bool("cover", false, "collect feedback signals (coverage)")
- flagSandbox = flag.String("sandbox", "none", "sandbox for fuzzing (none/setuid/namespace/android)")
- flagSandboxArg = flag.Int("sandbox_arg", 0, "argument for sandbox runner to adjust it via config")
- flagDebug = flag.Bool("debug", false, "debug output from executor")
- flagSlowdown = flag.Int("slowdown", 1, "execution slowdown caused by emulation/instrumentation")
-)
-
-func Default(target *prog.Target) (*ipc.Config, *flatrpc.ExecOpts, error) {
- sysTarget := targets.Get(target.OS, target.Arch)
- c := &ipc.Config{
- Executor: *flagExecutor,
- Timeouts: sysTarget.Timeouts(*flagSlowdown),
- }
- c.UseForkServer = sysTarget.ExecutorUsesForkServer
- c.RateLimit = sysTarget.HostFuzzer && target.OS != targets.TestOS
-
- opts := &flatrpc.ExecOpts{
- ExecFlags: flatrpc.ExecFlagDedupCover,
- }
- if *flagThreaded {
- opts.ExecFlags |= flatrpc.ExecFlagThreaded
- }
- if *flagSignal {
- opts.ExecFlags |= flatrpc.ExecFlagCollectSignal
- }
- if *flagSignal {
- opts.EnvFlags |= flatrpc.ExecEnvSignal
- }
- if *flagDebug {
- opts.EnvFlags |= flatrpc.ExecEnvDebug
- }
- sandboxFlags, err := ipc.SandboxToFlags(*flagSandbox)
- if err != nil {
- return nil, nil, err
- }
- opts.SandboxArg = int64(*flagSandboxArg)
- opts.EnvFlags |= sandboxFlags
- return c, opts, nil
-}
diff --git a/pkg/mgrconfig/load.go b/pkg/mgrconfig/load.go
index 2bccdc7df..ba446ffa3 100644
--- a/pkg/mgrconfig/load.go
+++ b/pkg/mgrconfig/load.go
@@ -29,7 +29,6 @@ type Derived struct {
TargetVMArch string
// Full paths to binaries we are going to use:
- FuzzerBin string
ExecprogBin string
ExecutorBin string
@@ -39,7 +38,7 @@ type Derived struct {
// Special debugging/development mode specified by VM type "none".
// In this mode syz-manager does not start any VMs, but instead a user is supposed
- // to start syz-fuzzer process in a VM manually.
+ // to start syz-executor process in a VM manually.
VMLess bool
}
@@ -263,16 +262,12 @@ func (cfg *Config) completeBinaries() error {
targetBin := func(name, arch string) string {
return filepath.Join(cfg.Syzkaller, "bin", cfg.TargetOS+"_"+arch, name+exe)
}
- cfg.FuzzerBin = targetBin("syz-fuzzer", cfg.TargetVMArch)
cfg.ExecprogBin = targetBin("syz-execprog", cfg.TargetVMArch)
cfg.ExecutorBin = targetBin("syz-executor", cfg.TargetArch)
// If the target already provides an executor binary, we don't need to copy it.
if cfg.SysTarget.ExecutorBin != "" {
cfg.ExecutorBin = ""
}
- if !osutil.IsExist(cfg.FuzzerBin) {
- return fmt.Errorf("bad config syzkaller param: can't find %v", cfg.FuzzerBin)
- }
if !osutil.IsExist(cfg.ExecprogBin) {
return fmt.Errorf("bad config syzkaller param: can't find %v", cfg.ExecprogBin)
}
diff --git a/pkg/report/fuchsia.go b/pkg/report/fuchsia.go
index e5c2d72d1..51d200251 100644
--- a/pkg/report/fuchsia.go
+++ b/pkg/report/fuchsia.go
@@ -54,7 +54,7 @@ func ctorFuchsia(cfg *config) (reporterImpl, []string, error) {
ctx.obj = filepath.Join(ctx.kernelObj, ctx.target.KernelObject)
}
suppressions := []string{
- "fatal exception: process /tmp/syz-fuzzer", // OOM presumably
+ "fatal exception: process /tmp/syz-executor", // OOM presumably
}
return ctx, suppressions, nil
}
diff --git a/pkg/report/linux.go b/pkg/report/linux.go
index acf0d6f79..26cb5461f 100644
--- a/pkg/report/linux.go
+++ b/pkg/report/linux.go
@@ -126,11 +126,11 @@ func ctorLinux(cfg *config) (reporterImpl, []string, error) {
"panic: failed to create temp dir",
"fatal error: unexpected signal during runtime execution", // presubmably OOM turned into SIGBUS
"signal SIGBUS: bus error", // presubmably OOM turned into SIGBUS
- "Out of memory: Kill process .* \\(syz-fuzzer\\)",
+ "Out of memory: Kill process .* \\(syz-executor\\)",
"Out of memory: Kill process .* \\(sshd\\)",
- "Killed process .* \\(syz-fuzzer\\)",
+ "Killed process .* \\(syz-executor\\)",
"Killed process .* \\(sshd\\)",
- "lowmemorykiller: Killing 'syz-fuzzer'",
+ "lowmemorykiller: Killing 'syz-executor'",
"lowmemorykiller: Killing 'sshd'",
"INIT: PANIC: segmentation violation!",
"\\*\\*\\* stack smashing detected \\*\\*\\*: terminated",
diff --git a/pkg/report/testdata/fuchsia/report/6 b/pkg/report/testdata/fuchsia/report/6
index 44993f4a8..2813e369e 100644
--- a/pkg/report/testdata/fuchsia/report/6
+++ b/pkg/report/testdata/fuchsia/report/6
@@ -7,7 +7,7 @@ by pkg/report, so we put the panic message here.
ZIRCON KERNEL PANIC
-[00131.346] 01102.01116> <== fatal exception: process /tmp/syz-fuzzer[31717] thread pthread_t:0x1184f772cb38[61384]
+[00131.346] 01102.01116> <== fatal exception: process /tmp/syz-executor[31717] thread pthread_t:0x1184f772cb38[61384]
[00131.346] 01102.01116> <== fatal page fault, PC at 0xd8af19736ef
[00131.346] 01102.01116> CS: 0 RIP: 0xd8af19736ef EFL: 0x10246 CR2: 0x6fe5cd59a000
[00131.346] 01102.01116> RAX: 0x6fe5cd59a000 RBX: 0x6ef13ea16400 RCX: 0xd8af2070520 RDX: 0xd8af386f0a0
@@ -36,18 +36,18 @@ ZIRCON KERNEL PANIC
[00131.375] 01102.01116> dso: id=63914be467c5f24aad721d5d496d022559a0562d base=0x77ed616e3000 name=libc.so
[00131.375] 01102.01116> dso: id=48b429c1159afb653a51dd253346e51e9844197b base=0x755a0bf72000 name=<vDSO>
[00131.375] 01102.01116> dso: id=773627f59f0eab9eece83b31d05d685e001bd9f2 base=0x65ccdbd16000 name=libfdio.so
-[00131.375] 01102.01116> dso: id=1496e1863bc310a7322542c41969d8ca90d92878 base=0xd8af0615000 name=app:/tmp/syz-fuzzer
-[00131.375] 01102.01116> bt#01: pc 0xd8af19736ef sp 0x6ef13e3cf6b8 (app:/tmp/syz-fuzzer,0x135e6ef)
-[00131.375] 01102.01116> bt#02: pc 0xd8af1974ae5 sp 0x6ef13e3cf6c8 (app:/tmp/syz-fuzzer,0x135fae5)
-[00131.375] 01102.01116> bt#03: pc 0xd8af1a30ba2 sp 0x6ef13e3cf6d8 (app:/tmp/syz-fuzzer,0x141bba2)
-[00131.375] 01102.01116> bt#04: pc 0xd8af1a311c2 sp 0x6ef13e3cf6e8 (app:/tmp/syz-fuzzer,0x141c1c2)
-[00131.375] 01102.01116> bt#05: pc 0xd8af1a36209 sp 0x6ef13e3cf6f8 (app:/tmp/syz-fuzzer,0x1421209)
-[00131.375] 01102.01116> bt#06: pc 0xd8af1a2d8cc sp 0x6ef13e3cf708 (app:/tmp/syz-fuzzer,0x14188cc)
-[00131.375] 01102.01116> bt#07: pc 0xd8af1a2d9ac sp 0x6ef13e3cf718 (app:/tmp/syz-fuzzer,0x14189ac)
-[00131.375] 01102.01116> bt#08: pc 0xd8af1ccc289 sp 0x6ef13e3cf728 (app:/tmp/syz-fuzzer,0x16b7289)
-[00131.375] 01102.01116> bt#09: pc 0xd8af1cc9320 sp 0x6ef13e3cf738 (app:/tmp/syz-fuzzer,0x16b4320)
-[00131.375] 01102.01116> bt#10: pc 0xd8af1d16e5f sp 0x6ef13e3cf748 (app:/tmp/syz-fuzzer,0x1701e5f)
-[00131.375] 01102.01116> bt#11: pc 0xd8af1d1699c sp 0x6ef13e3cf758 (app:/tmp/syz-fuzzer,0x170199c)
-[00131.375] 01102.01116> bt#12: pc 0xd8af1d158bf sp 0x6ef13e3cf768 (app:/tmp/syz-fuzzer,0x17008bf)
-[00131.375] 01102.01116> bt#13: pc 0xd8af19ba1a1 sp 0x6ef13e3cf778 (app:/tmp/syz-fuzzer,0x13a51a1)
+[00131.375] 01102.01116> dso: id=1496e1863bc310a7322542c41969d8ca90d92878 base=0xd8af0615000 name=app:/tmp/syz-executor
+[00131.375] 01102.01116> bt#01: pc 0xd8af19736ef sp 0x6ef13e3cf6b8 (app:/tmp/syz-executor,0x135e6ef)
+[00131.375] 01102.01116> bt#02: pc 0xd8af1974ae5 sp 0x6ef13e3cf6c8 (app:/tmp/syz-executor,0x135fae5)
+[00131.375] 01102.01116> bt#03: pc 0xd8af1a30ba2 sp 0x6ef13e3cf6d8 (app:/tmp/syz-executor,0x141bba2)
+[00131.375] 01102.01116> bt#04: pc 0xd8af1a311c2 sp 0x6ef13e3cf6e8 (app:/tmp/syz-executor,0x141c1c2)
+[00131.375] 01102.01116> bt#05: pc 0xd8af1a36209 sp 0x6ef13e3cf6f8 (app:/tmp/syz-executor,0x1421209)
+[00131.375] 01102.01116> bt#06: pc 0xd8af1a2d8cc sp 0x6ef13e3cf708 (app:/tmp/syz-executor,0x14188cc)
+[00131.375] 01102.01116> bt#07: pc 0xd8af1a2d9ac sp 0x6ef13e3cf718 (app:/tmp/syz-executor,0x14189ac)
+[00131.375] 01102.01116> bt#08: pc 0xd8af1ccc289 sp 0x6ef13e3cf728 (app:/tmp/syz-executor,0x16b7289)
+[00131.375] 01102.01116> bt#09: pc 0xd8af1cc9320 sp 0x6ef13e3cf738 (app:/tmp/syz-executor,0x16b4320)
+[00131.375] 01102.01116> bt#10: pc 0xd8af1d16e5f sp 0x6ef13e3cf748 (app:/tmp/syz-executor,0x1701e5f)
+[00131.375] 01102.01116> bt#11: pc 0xd8af1d1699c sp 0x6ef13e3cf758 (app:/tmp/syz-executor,0x170199c)
+[00131.375] 01102.01116> bt#12: pc 0xd8af1d158bf sp 0x6ef13e3cf768 (app:/tmp/syz-executor,0x17008bf)
+[00131.375] 01102.01116> bt#13: pc 0xd8af19ba1a1 sp 0x6ef13e3cf778 (app:/tmp/syz-executor,0x13a51a1)
[00131.375] 01102.01116> bt#14: end
diff --git a/pkg/rpcserver/last_executing.go b/pkg/rpcserver/last_executing.go
new file mode 100644
index 000000000..341ae6534
--- /dev/null
+++ b/pkg/rpcserver/last_executing.go
@@ -0,0 +1,68 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package rpcserver
+
+import (
+ "sort"
+ "time"
+)
+
+// LastExecuting keeps the given number of last executed programs
+// for each proc in a VM, and allows to query this set after a crash.
+type LastExecuting struct {
+ count int
+ procs []ExecRecord
+ positions []int
+}
+
+type ExecRecord struct {
+ ID int
+ Proc int
+ Prog []byte
+ Time time.Duration
+}
+
+func MakeLastExecuting(procs, count int) *LastExecuting {
+ return &LastExecuting{
+ count: count,
+ procs: make([]ExecRecord, procs*count),
+ positions: make([]int, procs),
+ }
+}
+
+// Note execution of the 'prog' on 'proc' at time 'now'.
+func (last *LastExecuting) Note(id, proc int, prog []byte, now time.Duration) {
+ pos := &last.positions[proc]
+ last.procs[proc*last.count+*pos] = ExecRecord{
+ ID: id,
+ Proc: proc,
+ Prog: prog,
+ Time: now,
+ }
+ *pos++
+ if *pos == last.count {
+ *pos = 0
+ }
+}
+
+// Returns a sorted set of last executing programs.
+// The records are sorted by time in ascending order.
+// ExecRecord.Time is the difference in start executing time between this
+// program and the program that started executing last.
+func (last *LastExecuting) Collect() []ExecRecord {
+ procs := last.procs
+ last.procs = nil // The type must not be used after this.
+ sort.Slice(procs, func(i, j int) bool {
+ return procs[i].Time < procs[j].Time
+ })
+ max := procs[len(procs)-1].Time
+ for i := len(procs) - 1; i >= 0; i-- {
+ if procs[i].Time == 0 {
+ procs = procs[i+1:]
+ break
+ }
+ procs[i].Time = max - procs[i].Time
+ }
+ return procs
+}
diff --git a/pkg/rpcserver/last_executing_test.go b/pkg/rpcserver/last_executing_test.go
new file mode 100644
index 000000000..c9f3cc2bf
--- /dev/null
+++ b/pkg/rpcserver/last_executing_test.go
@@ -0,0 +1,56 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package rpcserver
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestLastExecutingEmpty(t *testing.T) {
+ last := MakeLastExecuting(10, 10)
+ assert.Empty(t, last.Collect())
+}
+
+func TestLastExecuting(t *testing.T) {
+ last := MakeLastExecuting(10, 3)
+ last.Note(1, 0, []byte("prog1"), 1)
+
+ last.Note(2, 1, []byte("prog2"), 2)
+ last.Note(3, 1, []byte("prog3"), 3)
+
+ last.Note(4, 3, []byte("prog4"), 4)
+ last.Note(5, 3, []byte("prog5"), 5)
+ last.Note(6, 3, []byte("prog6"), 6)
+
+ last.Note(7, 7, []byte("prog7"), 7)
+ last.Note(8, 7, []byte("prog8"), 8)
+ last.Note(9, 7, []byte("prog9"), 9)
+ last.Note(10, 7, []byte("prog10"), 10)
+ last.Note(11, 7, []byte("prog11"), 11)
+
+ last.Note(12, 9, []byte("prog12"), 12)
+
+ last.Note(13, 8, []byte("prog13"), 13)
+
+ assert.Equal(t, last.Collect(), []ExecRecord{
+ {ID: 1, Proc: 0, Prog: []byte("prog1"), Time: 12},
+
+ {ID: 2, Proc: 1, Prog: []byte("prog2"), Time: 11},
+ {ID: 3, Proc: 1, Prog: []byte("prog3"), Time: 10},
+
+ {ID: 4, Proc: 3, Prog: []byte("prog4"), Time: 9},
+ {ID: 5, Proc: 3, Prog: []byte("prog5"), Time: 8},
+ {ID: 6, Proc: 3, Prog: []byte("prog6"), Time: 7},
+
+ {ID: 9, Proc: 7, Prog: []byte("prog9"), Time: 4},
+ {ID: 10, Proc: 7, Prog: []byte("prog10"), Time: 3},
+ {ID: 11, Proc: 7, Prog: []byte("prog11"), Time: 2},
+
+ {ID: 12, Proc: 9, Prog: []byte("prog12"), Time: 1},
+
+ {ID: 13, Proc: 8, Prog: []byte("prog13"), Time: 0},
+ })
+}
diff --git a/pkg/rpcserver/local.go b/pkg/rpcserver/local.go
new file mode 100644
index 000000000..da1de1fc0
--- /dev/null
+++ b/pkg/rpcserver/local.go
@@ -0,0 +1,138 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package rpcserver
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "time"
+
+ "github.com/google/syzkaller/pkg/cover"
+ "github.com/google/syzkaller/pkg/flatrpc"
+ "github.com/google/syzkaller/pkg/fuzzer/queue"
+ "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/osutil"
+ "github.com/google/syzkaller/pkg/signal"
+ "github.com/google/syzkaller/prog"
+)
+
+type LocalConfig struct {
+ Config
+ // syz-executor binary.
+ Executor string
+ // Temp dir where to run executor process, it's up to the caller to clean it up if necessary.
+ Dir string
+ // Handle ctrl+C and exit.
+ HandleInterrupts bool
+ // Run executor under gdb.
+ GDB bool
+ // RunLocal exits when the context is cancelled.
+ Context context.Context
+ MachineChecked func(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source
+}
+
+func RunLocal(cfg *LocalConfig) error {
+ cfg.RPC = ":0"
+ cfg.VMLess = true
+ cfg.PrintMachineCheck = log.V(1)
+ ctx := &local{
+ cfg: cfg,
+ setupDone: make(chan bool),
+ }
+ serv, err := newImpl(&cfg.Config, ctx)
+ if err != nil {
+ return err
+ }
+ defer serv.Close()
+ ctx.serv = serv
+ // setupDone synchronizes assignment to ctx.serv and read of ctx.serv in MachineChecked
+ // for the race detector b/c it does not understand the synchronization via TCP socket connect/accept.
+ close(ctx.setupDone)
+
+ bin := cfg.Executor
+ args := []string{"runner", "local", "localhost", fmt.Sprint(serv.Port)}
+ if cfg.GDB {
+ bin = "gdb"
+ args = append([]string{
+ "--return-child-result",
+ "--ex=handle SIGPIPE nostop",
+ "--args",
+ cfg.Executor,
+ }, args...)
+ }
+ cmd := exec.Command(bin, args...)
+ cmd.Dir = cfg.Dir
+ if cfg.Debug || cfg.GDB {
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ }
+ if cfg.GDB {
+ cmd.Stdin = os.Stdin
+ }
+ if err := cmd.Start(); err != nil {
+ return fmt.Errorf("failed to start executor: %w", err)
+ }
+ res := make(chan error, 1)
+ go func() { res <- cmd.Wait() }()
+ shutdown := make(chan struct{})
+ if cfg.HandleInterrupts {
+ osutil.HandleInterrupts(shutdown)
+ }
+ var cmdErr error
+ select {
+ case <-shutdown:
+ case <-cfg.Context.Done():
+ case err := <-res:
+ cmdErr = fmt.Errorf("executor process exited: %w", err)
+ }
+ if cmdErr == nil {
+ cmd.Process.Kill()
+ <-res
+ }
+ if !cfg.HandleInterrupts {
+ // If the executor has crashed early, reply to all remaining requests to unblock tests.
+ loop:
+ for {
+ req := serv.execSource.Next()
+ if req == nil {
+ select {
+ case <-cfg.Context.Done():
+ break loop
+ default:
+ time.Sleep(time.Millisecond)
+ continue loop
+ }
+ }
+ req.Done(&queue.Result{Status: queue.ExecFailure, Err: errors.New("executor crashed")})
+ }
+ }
+ return cmdErr
+}
+
+type local struct {
+ cfg *LocalConfig
+ serv *Server
+ setupDone chan bool
+}
+
+func (ctx *local) MachineChecked(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source {
+ <-ctx.setupDone
+ ctx.serv.TriagedCorpus()
+ return ctx.cfg.MachineChecked(features, syscalls)
+}
+
+func (ctx *local) BugFrames() ([]string, []string) {
+ return nil, nil
+}
+
+func (ctx *local) MaxSignal() signal.Signal {
+ return nil
+}
+
+func (ctx *local) CoverageFilter(modules []*cover.KernelModule) []uint64 {
+ return nil
+}
diff --git a/pkg/rpcserver/rpcserver.go b/pkg/rpcserver/rpcserver.go
new file mode 100644
index 000000000..1b090126b
--- /dev/null
+++ b/pkg/rpcserver/rpcserver.go
@@ -0,0 +1,796 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package rpcserver
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "maps"
+ "math/rand"
+ "os"
+ "slices"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/google/syzkaller/pkg/cover"
+ "github.com/google/syzkaller/pkg/flatrpc"
+ "github.com/google/syzkaller/pkg/fuzzer/queue"
+ "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/mgrconfig"
+ "github.com/google/syzkaller/pkg/osutil"
+ "github.com/google/syzkaller/pkg/signal"
+ "github.com/google/syzkaller/pkg/stats"
+ "github.com/google/syzkaller/pkg/vminfo"
+ "github.com/google/syzkaller/prog"
+ "github.com/google/syzkaller/sys/targets"
+)
+
+type Config struct {
+ vminfo.Config
+ RPC string
+ VMLess bool
+ PrintMachineCheck bool
+ Procs int
+ Slowdown int
+}
+
+type Manager interface {
+ MaxSignal() signal.Signal
+ BugFrames() (leaks []string, races []string)
+ MachineChecked(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source
+ CoverageFilter(modules []*cover.KernelModule) []uint64
+}
+
+type Server struct {
+ Port int
+ StatExecs *stats.Val
+ StatNumFuzzing *stats.Val
+
+ cfg *Config
+ mgr Manager
+ serv *flatrpc.Serv
+ target *prog.Target
+ timeouts targets.Timeouts
+ checker *vminfo.Checker
+
+ infoOnce sync.Once
+ checkDone atomic.Bool
+ checkFailures int
+ baseSource *queue.DynamicSourceCtl
+ enabledFeatures flatrpc.Feature
+ setupFeatures flatrpc.Feature
+ modules []*cover.KernelModule
+ canonicalModules *cover.Canonicalizer
+ coverFilter []uint64
+
+ mu sync.Mutex
+ runners map[string]*Runner
+ info map[string]VMState
+ execSource queue.Source
+ triagedCorpus atomic.Bool
+
+ statExecRetries *stats.Val
+ statExecutorRestarts *stats.Val
+ statExecBufferTooSmall *stats.Val
+ statVMRestarts *stats.Val
+ statNoExecRequests *stats.Val
+ statNoExecDuration *stats.Val
+}
+
+type Runner struct {
+ stopped bool
+ finished chan bool
+ injectExec chan<- bool
+ infoc chan chan []byte
+ conn *flatrpc.Conn
+ machineInfo []byte
+ canonicalizer *cover.CanonicalizerInstance
+ nextRequestID int64
+ requests map[int64]*queue.Request
+ executing map[int64]bool
+ lastExec *LastExecuting
+ rnd *rand.Rand
+}
+
+func New(cfg *mgrconfig.Config, mgr Manager, debug bool) (*Server, error) {
+ sandbox, err := flatrpc.SandboxToFlags(cfg.Sandbox)
+ if err != nil {
+ return nil, err
+ }
+ return newImpl(&Config{
+ Config: vminfo.Config{
+ Target: cfg.Target,
+ Features: flatrpc.AllFeatures,
+ Syscalls: cfg.Syscalls,
+ Debug: debug,
+ Cover: cfg.Cover,
+ Sandbox: sandbox,
+ SandboxArg: cfg.SandboxArg,
+ },
+ RPC: cfg.RPC,
+ VMLess: cfg.VMLess,
+ PrintMachineCheck: true,
+ Procs: cfg.Procs,
+ Slowdown: cfg.Timeouts.Slowdown,
+ }, mgr)
+}
+
+func newImpl(cfg *Config, mgr Manager) (*Server, error) {
+ cfg.Procs = min(cfg.Procs, prog.MaxPids)
+ checker := vminfo.New(&cfg.Config)
+ baseSource := queue.DynamicSource(checker)
+ serv := &Server{
+ cfg: cfg,
+ mgr: mgr,
+ target: cfg.Target,
+ timeouts: targets.Get(cfg.Target.OS, cfg.Target.Arch).Timeouts(cfg.Slowdown),
+ runners: make(map[string]*Runner),
+ info: make(map[string]VMState),
+ checker: checker,
+ baseSource: baseSource,
+ execSource: queue.Retry(baseSource),
+
+ StatExecs: stats.Create("exec total", "Total test program executions",
+ stats.Console, stats.Rate{}, stats.Prometheus("syz_exec_total")),
+ StatNumFuzzing: stats.Create("fuzzing VMs", "Number of VMs that are currently fuzzing",
+ stats.Console, stats.Link("/vms")),
+ statExecRetries: stats.Create("exec retries",
+ "Number of times a test program was restarted because the first run failed",
+ stats.Rate{}, stats.Graph("executor")),
+ statExecutorRestarts: stats.Create("executor restarts",
+ "Number of times executor process was restarted", stats.Rate{}, stats.Graph("executor")),
+ statExecBufferTooSmall: stats.Create("buffer too small",
+ "Program serialization overflowed exec buffer", stats.NoGraph),
+ statVMRestarts: stats.Create("vm restarts", "Total number of VM starts",
+ stats.Rate{}, stats.NoGraph),
+ statNoExecRequests: stats.Create("no exec requests",
+ "Number of times fuzzer was stalled with no exec requests", stats.Rate{}),
+ statNoExecDuration: stats.Create("no exec duration",
+ "Total duration fuzzer was stalled with no exec requests (ns/sec)", stats.Rate{}),
+ }
+ s, err := flatrpc.ListenAndServe(cfg.RPC, serv.handleConn)
+ if err != nil {
+ return nil, err
+ }
+ serv.serv = s
+ serv.Port = s.Addr.Port
+ return serv, nil
+}
+
+func (serv *Server) Close() error {
+ return serv.serv.Close()
+}
+
+type VMState struct {
+ State int
+ Timestamp time.Time
+}
+
+const (
+ StateOffline = iota
+ StateBooting
+ StateFuzzing
+ StateStopping
+)
+
+func (serv *Server) VMState() map[string]VMState {
+ serv.mu.Lock()
+ defer serv.mu.Unlock()
+ return maps.Clone(serv.info)
+}
+
+func (serv *Server) MachineInfo(name string) []byte {
+ serv.mu.Lock()
+ runner := serv.runners[name]
+ if runner != nil && (runner.conn == nil || runner.stopped) {
+ runner = nil
+ }
+ serv.mu.Unlock()
+ if runner == nil {
+ return []byte("VM is not alive")
+ }
+ return runner.machineInfo
+}
+
+func (serv *Server) RunnerStatus(name string) []byte {
+ serv.mu.Lock()
+ runner := serv.runners[name]
+ if runner != nil && (runner.conn == nil || runner.stopped) {
+ runner = nil
+ }
+ serv.mu.Unlock()
+ if runner == nil {
+ return []byte("VM is not alive")
+ }
+ resc := make(chan []byte, 1)
+ timeout := time.After(time.Minute)
+ select {
+ case runner.infoc <- resc:
+ case <-timeout:
+ return []byte("VM loop is not responding")
+ }
+ select {
+ case res := <-resc:
+ return res
+ case <-timeout:
+ return []byte("VM is not responding")
+ }
+}
+
+func (serv *Server) handleConn(conn *flatrpc.Conn) {
+ name, machineInfo, canonicalizer, err := serv.handshake(conn)
+ if err != nil {
+ log.Logf(1, "%v", err)
+ return
+ }
+
+ if serv.cfg.VMLess {
+ // There is no VM loop, so minic what it would do.
+ serv.CreateInstance(name, nil)
+ defer func() {
+ serv.StopFuzzing(name)
+ serv.ShutdownInstance(name, true)
+ }()
+ }
+
+ serv.mu.Lock()
+ runner := serv.runners[name]
+ if runner == nil || runner.stopped {
+ serv.mu.Unlock()
+ log.Logf(2, "VM %v shut down before connect", name)
+ return
+ }
+ serv.info[name] = VMState{StateFuzzing, time.Now()}
+ runner.conn = conn
+ runner.machineInfo = machineInfo
+ runner.canonicalizer = canonicalizer
+ serv.mu.Unlock()
+ defer close(runner.finished)
+
+ if serv.triagedCorpus.Load() {
+ if err := runner.sendStartLeakChecks(); err != nil {
+ log.Logf(2, "%v", err)
+ return
+ }
+ }
+
+ err = serv.connectionLoop(runner)
+ log.Logf(2, "runner %v: %v", name, err)
+}
+
+func (serv *Server) handshake(conn *flatrpc.Conn) (string, []byte, *cover.CanonicalizerInstance, error) {
+ connectReq, err := flatrpc.Recv[*flatrpc.ConnectRequestRaw](conn)
+ if err != nil {
+ return "", nil, nil, err
+ }
+ log.Logf(1, "runner %v connected", connectReq.Name)
+ if !serv.cfg.VMLess {
+ checkRevisions(connectReq, serv.cfg.Target)
+ }
+ serv.statVMRestarts.Add(1)
+
+ leaks, races := serv.mgr.BugFrames()
+ connectReply := &flatrpc.ConnectReply{
+ Debug: serv.cfg.Debug,
+ Cover: serv.cfg.Cover,
+ Procs: int32(serv.cfg.Procs),
+ Slowdown: int32(serv.timeouts.Slowdown),
+ SyscallTimeoutMs: int32(serv.timeouts.Syscall / time.Millisecond),
+ ProgramTimeoutMs: int32(serv.timeouts.Program / time.Millisecond),
+ LeakFrames: leaks,
+ RaceFrames: races,
+ }
+ connectReply.Files = serv.checker.RequiredFiles()
+ if serv.checkDone.Load() {
+ connectReply.Features = serv.setupFeatures
+ } else {
+ connectReply.Files = append(connectReply.Files, serv.checker.CheckFiles()...)
+ connectReply.Globs = serv.target.RequiredGlobs()
+ connectReply.Features = serv.cfg.Features
+ }
+ if err := flatrpc.Send(conn, connectReply); err != nil {
+ return "", nil, nil, err
+ }
+
+ infoReq, err := flatrpc.Recv[*flatrpc.InfoRequestRaw](conn)
+ if err != nil {
+ return "", nil, nil, err
+ }
+ modules, machineInfo, err := serv.checker.MachineInfo(infoReq.Files)
+ if err != nil {
+ log.Logf(0, "parsing of machine info failed: %v", err)
+ if infoReq.Error == "" {
+ infoReq.Error = err.Error()
+ }
+ }
+ if infoReq.Error != "" {
+ log.Logf(0, "machine check failed: %v", infoReq.Error)
+ serv.checkFailures++
+ if serv.checkFailures == 10 {
+ log.Fatalf("machine check failing")
+ }
+ return "", nil, nil, errors.New("machine check failed")
+ }
+
+ serv.infoOnce.Do(func() {
+ serv.modules = modules
+ serv.canonicalModules = cover.NewCanonicalizer(modules, serv.cfg.Cover)
+ serv.coverFilter = serv.mgr.CoverageFilter(modules)
+ globs := make(map[string][]string)
+ for _, glob := range infoReq.Globs {
+ globs[glob.Name] = glob.Files
+ }
+ serv.target.UpdateGlobs(globs)
+ // Flatbuffers don't do deep copy of byte slices,
+ // so clone manually since we pass it a goroutine.
+ for _, file := range infoReq.Files {
+ file.Data = slices.Clone(file.Data)
+ }
+ // Now execute check programs.
+ go func() {
+ if err := serv.runCheck(infoReq.Files, infoReq.Features); err != nil {
+ log.Fatalf("check failed: %v", err)
+ }
+ }()
+ })
+
+ canonicalizer := serv.canonicalModules.NewInstance(modules)
+ infoReply := &flatrpc.InfoReply{
+ CoverFilter: canonicalizer.Decanonicalize(serv.coverFilter),
+ }
+ if err := flatrpc.Send(conn, infoReply); err != nil {
+ return "", nil, nil, err
+ }
+ return connectReq.Name, machineInfo, canonicalizer, nil
+}
+
+func (serv *Server) connectionLoop(runner *Runner) error {
+ if serv.cfg.Cover {
+ maxSignal := serv.mgr.MaxSignal().ToRaw()
+ for len(maxSignal) != 0 {
+ // Split coverage into batches to not grow the connection serialization
+ // buffer too much (we don't want to grow it larger than what will be needed
+ // to send programs).
+ n := min(len(maxSignal), 50000)
+ if err := runner.sendSignalUpdate(maxSignal[:n], nil); err != nil {
+ return err
+ }
+ maxSignal = maxSignal[n:]
+ }
+ }
+
+ serv.StatNumFuzzing.Add(1)
+ defer serv.StatNumFuzzing.Add(-1)
+ var infoc chan []byte
+ defer func() {
+ if infoc != nil {
+ infoc <- []byte("VM has crashed")
+ }
+ }()
+ for {
+ if infoc == nil {
+ select {
+ case infoc = <-runner.infoc:
+ msg := &flatrpc.HostMessage{
+ Msg: &flatrpc.HostMessages{
+ Type: flatrpc.HostMessagesRawStateRequest,
+ Value: &flatrpc.StateRequest{},
+ },
+ }
+ if err := flatrpc.Send(runner.conn, msg); err != nil {
+ return err
+ }
+ default:
+ }
+ }
+ for len(runner.requests)-len(runner.executing) < 2*serv.cfg.Procs {
+ req := serv.execSource.Next()
+ if req == nil {
+ break
+ }
+ if err := serv.sendRequest(runner, req); err != nil {
+ return err
+ }
+ }
+ if len(runner.requests) == 0 {
+ // The runner has not requests at all, so don't wait to receive anything from it.
+ time.Sleep(10 * time.Millisecond)
+ continue
+ }
+ raw, err := flatrpc.Recv[*flatrpc.ExecutorMessageRaw](runner.conn)
+ if err != nil {
+ return err
+ }
+ if raw.Msg == nil || raw.Msg.Value == nil {
+ return errors.New("received no message")
+ }
+ switch msg := raw.Msg.Value.(type) {
+ case *flatrpc.ExecutingMessage:
+ err = serv.handleExecutingMessage(runner, msg)
+ case *flatrpc.ExecResult:
+ err = serv.handleExecResult(runner, msg)
+ case *flatrpc.StateResult:
+ if infoc != nil {
+ buf := new(bytes.Buffer)
+ fmt.Fprintf(buf, "pending requests on the VM:")
+ for id := range runner.requests {
+ fmt.Fprintf(buf, " %v", id)
+ }
+ fmt.Fprintf(buf, "\n\n")
+ infoc <- append(buf.Bytes(), msg.Data...)
+ infoc = nil
+ }
+ default:
+ return fmt.Errorf("received unknown message type %T", msg)
+ }
+ if err != nil {
+ return err
+ }
+ }
+}
+
+func (serv *Server) sendRequest(runner *Runner, req *queue.Request) error {
+ if err := req.Validate(); err != nil {
+ panic(err)
+ }
+ runner.nextRequestID++
+ id := runner.nextRequestID
+ var flags flatrpc.RequestFlag
+ if req.ReturnOutput {
+ flags |= flatrpc.RequestFlagReturnOutput
+ }
+ if req.ReturnError {
+ flags |= flatrpc.RequestFlagReturnError
+ }
+ allSignal := make([]int32, len(req.ReturnAllSignal))
+ for i, call := range req.ReturnAllSignal {
+ allSignal[i] = int32(call)
+ }
+ // Do not let too much state accumulate.
+ const restartIn = 600
+ resetFlags := flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover | flatrpc.ExecFlagCollectComps
+ opts := req.ExecOpts
+ if req.ExecOpts.ExecFlags&resetFlags != 0 && runner.rnd.Intn(restartIn) == 0 {
+ opts.EnvFlags |= flatrpc.ExecEnvResetState
+ }
+ if serv.cfg.Debug {
+ opts.EnvFlags |= flatrpc.ExecEnvDebug
+ }
+ var data []byte
+ if req.BinaryFile == "" {
+ progData, err := req.Prog.SerializeForExec()
+ if err != nil {
+ // It's bad if we systematically fail to serialize programs,
+ // but so far we don't have a better handling than counting this.
+ // This error is observed a lot on the seeded syz_mount_image calls.
+ serv.statExecBufferTooSmall.Add(1)
+ req.Done(&queue.Result{Status: queue.ExecFailure})
+ return nil
+ }
+ data = progData
+ } else {
+ flags |= flatrpc.RequestFlagIsBinary
+ fileData, err := os.ReadFile(req.BinaryFile)
+ if err != nil {
+ req.Done(&queue.Result{
+ Status: queue.ExecFailure,
+ Err: err,
+ })
+ return nil
+ }
+ data = fileData
+ }
+ msg := &flatrpc.HostMessage{
+ Msg: &flatrpc.HostMessages{
+ Type: flatrpc.HostMessagesRawExecRequest,
+ Value: &flatrpc.ExecRequest{
+ Id: id,
+ ProgData: data,
+ Flags: flags,
+ ExecOpts: &opts,
+ AllSignal: allSignal,
+ },
+ },
+ }
+ runner.requests[id] = req
+ return flatrpc.Send(runner.conn, msg)
+}
+
+func (serv *Server) handleExecutingMessage(runner *Runner, msg *flatrpc.ExecutingMessage) error {
+ req := runner.requests[msg.Id]
+ if req == nil {
+ return fmt.Errorf("can't find executing request %v", msg.Id)
+ }
+ proc := int(msg.ProcId)
+ if proc < 0 || proc >= serv.cfg.Procs {
+ return fmt.Errorf("got bad proc id %v", proc)
+ }
+ serv.StatExecs.Add(1)
+ if msg.Try == 0 {
+ if msg.WaitDuration != 0 {
+ serv.statNoExecRequests.Add(1)
+ // Cap wait duration to 1 second to avoid extreme peaks on the graph
+ // which make it impossible to see real data (the rest becomes a flat line).
+ serv.statNoExecDuration.Add(int(min(msg.WaitDuration, 1e9)))
+ }
+ } else {
+ serv.statExecRetries.Add(1)
+ }
+ runner.lastExec.Note(int(msg.Id), proc, req.Prog.Serialize(), osutil.MonotonicNano())
+ select {
+ case runner.injectExec <- true:
+ default:
+ }
+ runner.executing[msg.Id] = true
+ return nil
+}
+
+func (serv *Server) handleExecResult(runner *Runner, msg *flatrpc.ExecResult) error {
+ req := runner.requests[msg.Id]
+ if req == nil {
+ return fmt.Errorf("can't find executed request %v", msg.Id)
+ }
+ delete(runner.requests, msg.Id)
+ delete(runner.executing, msg.Id)
+ if msg.Info != nil {
+ for len(msg.Info.Calls) < len(req.Prog.Calls) {
+ msg.Info.Calls = append(msg.Info.Calls, &flatrpc.CallInfo{
+ Error: 999,
+ })
+ }
+ msg.Info.Calls = msg.Info.Calls[:len(req.Prog.Calls)]
+ if msg.Info.Freshness == 0 {
+ serv.statExecutorRestarts.Add(1)
+ }
+ if !serv.cfg.Cover && req.ExecOpts.ExecFlags&flatrpc.ExecFlagCollectSignal != 0 {
+ // Coverage collection is disabled, but signal was requested => use a substitute signal.
+ addFallbackSignal(req.Prog, msg.Info)
+ }
+ for i := 0; i < len(msg.Info.Calls); i++ {
+ call := msg.Info.Calls[i]
+ call.Cover = runner.canonicalizer.Canonicalize(call.Cover)
+ call.Signal = runner.canonicalizer.Canonicalize(call.Signal)
+ }
+ if len(msg.Info.ExtraRaw) != 0 {
+ msg.Info.Extra = msg.Info.ExtraRaw[0]
+ for _, info := range msg.Info.ExtraRaw[1:] {
+ // All processing in the fuzzer later will convert signal/cover to maps and dedup,
+ // so there is little point in deduping here.
+ msg.Info.Extra.Cover = append(msg.Info.Extra.Cover, info.Cover...)
+ msg.Info.Extra.Signal = append(msg.Info.Extra.Signal, info.Signal...)
+ }
+ msg.Info.Extra.Cover = runner.canonicalizer.Canonicalize(msg.Info.Extra.Cover)
+ msg.Info.Extra.Signal = runner.canonicalizer.Canonicalize(msg.Info.Extra.Signal)
+ msg.Info.ExtraRaw = nil
+ }
+ }
+ status := queue.Success
+ var resErr error
+ if msg.Error != "" {
+ status = queue.ExecFailure
+ resErr = errors.New(msg.Error)
+ }
+ req.Done(&queue.Result{
+ Status: status,
+ Info: msg.Info,
+ Output: slices.Clone(msg.Output),
+ Err: resErr,
+ })
+ return nil
+}
+
+func checkRevisions(a *flatrpc.ConnectRequest, target *prog.Target) {
+ if target.Arch != a.Arch {
+ log.Fatalf("mismatching manager/executor arches: %v vs %v", target.Arch, a.Arch)
+ }
+ if prog.GitRevision != a.GitRevision {
+ log.Fatalf("mismatching manager/executor git revisions: %v vs %v",
+ prog.GitRevision, a.GitRevision)
+ }
+ if target.Revision != a.SyzRevision {
+ log.Fatalf("mismatching manager/executor system call descriptions: %v vs %v",
+ target.Revision, a.SyzRevision)
+ }
+}
+
+func (serv *Server) runCheck(checkFilesInfo []*flatrpc.FileInfo, checkFeatureInfo []*flatrpc.FeatureInfo) error {
+ enabledCalls, disabledCalls, features, checkErr := serv.checker.Run(checkFilesInfo, checkFeatureInfo)
+ enabledCalls, transitivelyDisabled := serv.target.TransitivelyEnabledCalls(enabledCalls)
+ // Note: need to print disbled syscalls before failing due to an error.
+ // This helps to debug "all system calls are disabled".
+ if serv.cfg.PrintMachineCheck {
+ serv.printMachineCheck(checkFilesInfo, enabledCalls, disabledCalls, transitivelyDisabled, features)
+ }
+ if checkErr != nil {
+ return checkErr
+ }
+ serv.enabledFeatures = features.Enabled()
+ serv.setupFeatures = features.NeedSetup()
+ newSource := serv.mgr.MachineChecked(serv.enabledFeatures, enabledCalls)
+ serv.baseSource.Store(newSource)
+ serv.checkDone.Store(true)
+ return nil
+}
+
+func (serv *Server) printMachineCheck(checkFilesInfo []*flatrpc.FileInfo, enabledCalls map[*prog.Syscall]bool,
+ disabledCalls, transitivelyDisabled map[*prog.Syscall]string, features vminfo.Features) {
+ buf := new(bytes.Buffer)
+ if len(serv.cfg.Syscalls) != 0 || log.V(1) {
+ if len(disabledCalls) != 0 {
+ var lines []string
+ for call, reason := range disabledCalls {
+ lines = append(lines, fmt.Sprintf("%-44v: %v\n", call.Name, reason))
+ }
+ sort.Strings(lines)
+ fmt.Fprintf(buf, "disabled the following syscalls:\n%s\n", strings.Join(lines, ""))
+ }
+ if len(transitivelyDisabled) != 0 {
+ var lines []string
+ for call, reason := range transitivelyDisabled {
+ lines = append(lines, fmt.Sprintf("%-44v: %v\n", call.Name, reason))
+ }
+ sort.Strings(lines)
+ fmt.Fprintf(buf, "transitively disabled the following syscalls"+
+ " (missing resource [creating syscalls]):\n%s\n",
+ strings.Join(lines, ""))
+ }
+ }
+ hasFileErrors := false
+ for _, file := range checkFilesInfo {
+ if file.Error == "" {
+ continue
+ }
+ if !hasFileErrors {
+ fmt.Fprintf(buf, "failed to read the following files in the VM:\n")
+ }
+ fmt.Fprintf(buf, "%-44v: %v\n", file.Name, file.Error)
+ hasFileErrors = true
+ }
+ if hasFileErrors {
+ fmt.Fprintf(buf, "\n")
+ }
+ var lines []string
+ lines = append(lines, fmt.Sprintf("%-24v: %v/%v\n", "syscalls",
+ len(enabledCalls), len(serv.cfg.Target.Syscalls)))
+ for feat, info := range features {
+ lines = append(lines, fmt.Sprintf("%-24v: %v\n",
+ flatrpc.EnumNamesFeature[feat], info.Reason))
+ }
+ sort.Strings(lines)
+ buf.WriteString(strings.Join(lines, ""))
+ fmt.Fprintf(buf, "\n")
+ log.Logf(0, "machine check:\n%s", buf.Bytes())
+}
+
+func (serv *Server) CreateInstance(name string, injectExec chan<- bool) {
+ runner := &Runner{
+ injectExec: injectExec,
+ infoc: make(chan chan []byte),
+ finished: make(chan bool),
+ requests: make(map[int64]*queue.Request),
+ executing: make(map[int64]bool),
+ lastExec: MakeLastExecuting(serv.cfg.Procs, 6),
+ rnd: rand.New(rand.NewSource(time.Now().UnixNano())),
+ }
+ serv.mu.Lock()
+ if serv.runners[name] != nil {
+ panic(fmt.Sprintf("duplicate instance %s", name))
+ }
+ serv.runners[name] = runner
+ serv.info[name] = VMState{StateBooting, time.Now()}
+ serv.mu.Unlock()
+}
+
+// stopInstance prevents further request exchange requests.
+// To make RPCServer fully forget an instance, shutdownInstance() must be called.
+func (serv *Server) StopFuzzing(name string) {
+ serv.mu.Lock()
+ runner := serv.runners[name]
+ runner.stopped = true
+ conn := runner.conn
+ serv.info[name] = VMState{StateStopping, time.Now()}
+ serv.mu.Unlock()
+ if conn != nil {
+ conn.Close()
+ }
+}
+
+func (serv *Server) ShutdownInstance(name string, crashed bool) ([]ExecRecord, []byte) {
+ serv.mu.Lock()
+ runner := serv.runners[name]
+ delete(serv.runners, name)
+ serv.info[name] = VMState{StateOffline, time.Now()}
+ serv.mu.Unlock()
+ if runner.conn != nil {
+ // Wait for the connection goroutine to finish and stop touching data.
+ // If conn is nil before we removed the runner, then it won't touch anything.
+ <-runner.finished
+ }
+ for id, req := range runner.requests {
+ status := queue.Restarted
+ if crashed && runner.executing[id] {
+ status = queue.Crashed
+ }
+ req.Done(&queue.Result{Status: status})
+ }
+ return runner.lastExec.Collect(), runner.machineInfo
+}
+
+func (serv *Server) DistributeSignalDelta(plus, minus signal.Signal) {
+ plusRaw := plus.ToRaw()
+ minusRaw := minus.ToRaw()
+ serv.foreachRunnerAsync(func(runner *Runner) {
+ runner.sendSignalUpdate(plusRaw, minusRaw)
+ })
+}
+
+func (runner *Runner) sendSignalUpdate(plus, minus []uint64) error {
+ msg := &flatrpc.HostMessage{
+ Msg: &flatrpc.HostMessages{
+ Type: flatrpc.HostMessagesRawSignalUpdate,
+ Value: &flatrpc.SignalUpdate{
+ NewMax: runner.canonicalizer.Decanonicalize(plus),
+ DropMax: runner.canonicalizer.Decanonicalize(minus),
+ },
+ },
+ }
+ return flatrpc.Send(runner.conn, msg)
+}
+
+func (serv *Server) TriagedCorpus() {
+ serv.triagedCorpus.Store(true)
+ serv.foreachRunnerAsync(func(runner *Runner) {
+ runner.sendStartLeakChecks()
+ })
+}
+
+func (runner *Runner) sendStartLeakChecks() error {
+ msg := &flatrpc.HostMessage{
+ Msg: &flatrpc.HostMessages{
+ Type: flatrpc.HostMessagesRawStartLeakChecks,
+ Value: &flatrpc.StartLeakChecks{},
+ },
+ }
+ return flatrpc.Send(runner.conn, msg)
+}
+
+// foreachRunnerAsync runs callback fn for each connected runner asynchronously.
+// If a VM has hanged w/o reading out the socket, we want to avoid blocking
+// important goroutines on the send operations.
+func (serv *Server) foreachRunnerAsync(fn func(runner *Runner)) {
+ serv.mu.Lock()
+ defer serv.mu.Unlock()
+ for _, runner := range serv.runners {
+ if runner.conn != nil {
+ go fn(runner)
+ }
+ }
+}
+
+// addFallbackSignal computes simple fallback signal in cases we don't have real coverage signal.
+// We use syscall number or-ed with returned errno value as signal.
+// At least this gives us all combinations of syscall+errno.
+func addFallbackSignal(p *prog.Prog, info *flatrpc.ProgInfo) {
+ callInfos := make([]prog.CallInfo, len(info.Calls))
+ for i, inf := range info.Calls {
+ if inf.Flags&flatrpc.CallFlagExecuted != 0 {
+ callInfos[i].Flags |= prog.CallExecuted
+ }
+ if inf.Flags&flatrpc.CallFlagFinished != 0 {
+ callInfos[i].Flags |= prog.CallFinished
+ }
+ if inf.Flags&flatrpc.CallFlagBlocked != 0 {
+ callInfos[i].Flags |= prog.CallBlocked
+ }
+ callInfos[i].Errno = int(inf.Error)
+ }
+ p.FallbackSignal(callInfos)
+ for i, inf := range callInfos {
+ info.Calls[i].Signal = inf.Signal
+ }
+}
diff --git a/pkg/runtest/executor_test.go b/pkg/runtest/executor_test.go
new file mode 100644
index 000000000..d6f9a8434
--- /dev/null
+++ b/pkg/runtest/executor_test.go
@@ -0,0 +1,131 @@
+// Copyright 2015 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package runtest
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "runtime"
+ "testing"
+ "time"
+
+ "github.com/google/syzkaller/pkg/csource"
+ "github.com/google/syzkaller/pkg/flatrpc"
+ "github.com/google/syzkaller/pkg/fuzzer/queue"
+ "github.com/google/syzkaller/pkg/image"
+ "github.com/google/syzkaller/pkg/osutil"
+ "github.com/google/syzkaller/pkg/testutil"
+ "github.com/google/syzkaller/prog"
+ _ "github.com/google/syzkaller/sys"
+ "github.com/google/syzkaller/sys/targets"
+)
+
+// TestExecutor runs all internal executor unit tests.
+// We do it here because we already build executor binary here.
+func TestExecutor(t *testing.T) {
+ t.Parallel()
+ for _, sysTarget := range targets.List[runtime.GOOS] {
+ sysTarget := targets.Get(runtime.GOOS, sysTarget.Arch)
+ t.Run(sysTarget.Arch, func(t *testing.T) {
+ if sysTarget.BrokenCompiler != "" {
+ t.Skipf("skipping, broken cross-compiler: %v", sysTarget.BrokenCompiler)
+ }
+ t.Parallel()
+ target, err := prog.GetTarget(runtime.GOOS, sysTarget.Arch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bin := csource.BuildExecutor(t, target, "../..")
+ // qemu-user may allow us to run some cross-arch binaries.
+ if _, err := osutil.RunCmd(time.Minute, "", bin, "test"); err != nil {
+ if sysTarget.Arch == runtime.GOARCH || sysTarget.VMArch == runtime.GOARCH {
+ t.Fatal(err)
+ }
+ t.Skipf("skipping, cross-arch binary failed: %v", err)
+ }
+ })
+ }
+}
+
+func TestZlib(t *testing.T) {
+ t.Parallel()
+ target, err := prog.GetTarget(targets.TestOS, targets.TestArch64)
+ if err != nil {
+ t.Fatal(err)
+ }
+ sysTarget := targets.Get(target.OS, target.Arch)
+ if sysTarget.BrokenCompiler != "" {
+ t.Skipf("skipping, broken cross-compiler: %v", sysTarget.BrokenCompiler)
+ }
+ executor := csource.BuildExecutor(t, target, "../..")
+ source := queue.Plain()
+ startRpcserver(t, target, executor, source)
+ r := rand.New(testutil.RandSource(t))
+ for i := 0; i < 10; i++ {
+ data := testutil.RandMountImage(r)
+ compressed := image.Compress(data)
+ text := fmt.Sprintf(`syz_compare_zlib(&(0x7f0000000000)="$%s", AUTO, &(0x7f0000800000)="$%s", AUTO)`,
+ image.EncodeB64(data), image.EncodeB64(compressed))
+ p, err := target.Deserialize([]byte(text), prog.Strict)
+ if err != nil {
+ t.Fatalf("failed to deserialize empty program: %v", err)
+ }
+ req := &queue.Request{
+ Prog: p,
+ ReturnError: true,
+ ReturnOutput: true,
+ ExecOpts: flatrpc.ExecOpts{
+ EnvFlags: flatrpc.ExecEnvSandboxNone,
+ },
+ }
+ source.Submit(req)
+ res := req.Wait(context.Background())
+ if res.Err != nil {
+ t.Fatalf("program execution failed: %v\n%s", res.Err, res.Output)
+ }
+ if res.Info.Calls[0].Error != 0 {
+ t.Fatalf("data comparison failed: %v\n%s", res.Info.Calls[0].Error, res.Output)
+ }
+ }
+}
+
+func TestExecutorCommonExt(t *testing.T) {
+ t.Parallel()
+ target, err := prog.GetTarget("test", "64_fork")
+ if err != nil {
+ t.Fatal(err)
+ }
+ sysTarget := targets.Get(target.OS, target.Arch)
+ if sysTarget.BrokenCompiler != "" {
+ t.Skipf("skipping, broken cross-compiler: %v", sysTarget.BrokenCompiler)
+ }
+ executor := csource.BuildExecutor(t, target, "../..", "-DSYZ_TEST_COMMON_EXT_EXAMPLE=1")
+ // The example setup_ext_test does:
+ // *(uint64*)(SYZ_DATA_OFFSET + 0x1234) = 0xbadc0ffee;
+ // The following program tests that that value is present at 0x1234.
+ test := `syz_compare(&(0x7f0000001234)="", 0x8, &(0x7f0000000000)=@blob="eeffc0ad0b000000", AUTO)`
+ p, err := target.Deserialize([]byte(test), prog.Strict)
+ if err != nil {
+ t.Fatal(err)
+ }
+ source := queue.Plain()
+ startRpcserver(t, target, executor, source)
+ req := &queue.Request{
+ Prog: p,
+ ReturnError: true,
+ ReturnOutput: true,
+ ExecOpts: flatrpc.ExecOpts{
+ EnvFlags: flatrpc.ExecEnvSandboxNone,
+ },
+ }
+ source.Submit(req)
+ res := req.Wait(context.Background())
+ if res.Err != nil {
+ t.Fatalf("program execution failed: %v\n%s", res.Err, res.Output)
+ }
+ if call := res.Info.Calls[0]; call.Flags&flatrpc.CallFlagFinished == 0 || call.Error != 0 {
+ t.Fatalf("bad call result: flags=%x errno=%v", call.Flags, call.Error)
+ }
+}
diff --git a/pkg/runtest/run.go b/pkg/runtest/run.go
index eb57582a3..cef85f6e9 100644
--- a/pkg/runtest/run.go
+++ b/pkg/runtest/run.go
@@ -22,25 +22,25 @@ import (
"sort"
"strconv"
"strings"
- "sync"
"github.com/google/syzkaller/pkg/csource"
"github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
- "github.com/google/syzkaller/pkg/ipc"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/targets"
- "golang.org/x/sync/errgroup"
)
type runRequest struct {
*queue.Request
+ sourceOpts *csource.Options
+ executor queue.Executor
ok int
failed int
err error
result *queue.Result
results *flatrpc.ProgInfo // the expected results
+ repeat int // only relevant for C tests
name string
broken string
@@ -58,7 +58,9 @@ type Context struct {
Debug bool
Tests string // prefix to match test file names
- executor queue.PlainQueue
+ executor *queue.DynamicOrderer
+ requests []*runRequest
+ buildSem chan bool
}
func (ctx *Context) log(msg string, args ...interface{}) {
@@ -66,87 +68,11 @@ func (ctx *Context) log(msg string, args ...interface{}) {
}
func (ctx *Context) Run() error {
- if ctx.Retries%2 == 0 {
- ctx.Retries++
- }
- progs := make(chan *runRequest, 1000)
- var eg errgroup.Group
- eg.Go(func() error {
- defer close(progs)
- return ctx.generatePrograms(progs)
- })
- done := make(chan *runRequest)
- eg.Go(func() error {
- return ctx.processResults(done)
- })
-
- var wg sync.WaitGroup
- for req := range progs {
- req := req
- if req.broken != "" || req.skip != "" {
- done <- req
- continue
- }
- var retry queue.DoneCallback
- retry = func(_ *queue.Request, res *queue.Result) bool {
- // The tests depend on timings and may be flaky, esp on overloaded/slow machines.
- // We don't want to fix this by significantly bumping all timeouts,
- // because if a program fails all the time with the default timeouts,
- // it will also fail during fuzzing. And we want to ensure that it's not the case.
- // So what we want is to tolerate episodic failures with the default timeouts.
- // To achieve this we run each test several times and ensure that it passes
- // in 50+% of cases (i.e. 1/1, 2/3, 3/5, 4/7, etc).
- // In the best case this allows to get off with just 1 test run.
-
- if res.Err != nil {
- req.err = res.Err
- return true
- }
- req.result = res
- err := checkResult(req)
- if err == nil {
- req.ok++
- } else {
- req.failed++
- req.err = err
- }
- if req.ok > req.failed {
- // There are more successful than failed runs.
- req.err = nil
- return true
- }
- // We need at least `failed - ok + 1` more runs <=> `failed + ok + need` in total,
- // which simplifies to `failed * 2 + 1`.
- if req.failed*2+1 <= ctx.Retries {
- // We can still retry the execution.
- req.OnDone(retry)
- ctx.executor.Submit(req.Request)
- return false
- }
- // Give up and fail on this request.
- return true
- }
- req.Request.OnDone(retry)
- ctx.executor.Submit(req.Request)
- wg.Add(1)
- go func() {
- defer wg.Done()
- req.Request.Wait(context.Background())
- done <- req
- }()
- }
- wg.Wait()
- close(done)
- return eg.Wait()
-}
-
-func (ctx *Context) Next() *queue.Request {
- return ctx.executor.Next()
-}
-
-func (ctx *Context) processResults(requests chan *runRequest) error {
+ ctx.buildSem = make(chan bool, runtime.GOMAXPROCS(0))
+ ctx.executor = queue.DynamicOrder()
+ ctx.generatePrograms()
var ok, fail, broken, skip int
- for req := range requests {
+ for _, req := range ctx.requests {
result := ""
verbose := false
if req.broken != "" {
@@ -158,14 +84,14 @@ func (ctx *Context) processResults(requests chan *runRequest) error {
result = fmt.Sprintf("SKIP (%v)", req.skip)
verbose = true
} else {
+ req.Request.Wait(context.Background())
if req.err != nil {
fail++
result = fmt.Sprintf("FAIL: %v",
strings.Replace(req.err.Error(), "\n", "\n\t", -1))
- res := req.result
- if len(res.Output) != 0 {
+ if req.result != nil && len(req.result.Output) != 0 {
result += fmt.Sprintf("\n\t%s",
- strings.Replace(string(res.Output), "\n", "\n\t", -1))
+ strings.Replace(string(req.result.Output), "\n", "\n\t", -1))
}
} else {
ok++
@@ -186,7 +112,52 @@ func (ctx *Context) processResults(requests chan *runRequest) error {
return nil
}
-func (ctx *Context) generatePrograms(progs chan *runRequest) error {
+func (ctx *Context) Next() *queue.Request {
+ return ctx.executor.Next()
+}
+
+func (ctx *Context) onDone(req *runRequest, res *queue.Result) bool {
+ // The tests depend on timings and may be flaky, esp on overloaded/slow machines.
+ // We don't want to fix this by significantly bumping all timeouts,
+ // because if a program fails all the time with the default timeouts,
+ // it will also fail during fuzzing. And we want to ensure that it's not the case.
+ // So what we want is to tolerate episodic failures with the default timeouts.
+ // To achieve this we run each test several times and ensure that it passes
+ // in 50+% of cases (i.e. 1/1, 2/3, 3/5, 4/7, etc).
+ // In the best case this allows to get off with just 1 test run.
+ if res.Err != nil {
+ req.err = res.Err
+ return true
+ }
+ req.result = res
+ err := checkResult(req)
+ if err == nil {
+ req.ok++
+ } else {
+ req.failed++
+ req.err = err
+ }
+ if req.ok > req.failed {
+ // There are more successful than failed runs.
+ req.err = nil
+ return true
+ }
+ // We need at least `failed - ok + 1` more runs <=> `failed + ok + need` in total,
+ // which simplifies to `failed * 2 + 1`.
+ retries := ctx.Retries
+ if retries%2 == 0 {
+ retries++
+ }
+ if req.failed*2+1 <= retries {
+ // We can still retry the execution.
+ ctx.submit(req)
+ return false
+ }
+ // Give up and fail on this request.
+ return true
+}
+
+func (ctx *Context) generatePrograms() error {
cover := []bool{false}
if ctx.Features&flatrpc.FeatureCoverage != 0 {
cover = append(cover, true)
@@ -201,7 +172,7 @@ func (ctx *Context) generatePrograms(progs chan *runRequest) error {
return err
}
for _, file := range files {
- if err := ctx.generateFile(progs, sandboxes, cover, file); err != nil {
+ if err := ctx.generateFile(sandboxes, cover, file); err != nil {
return err
}
}
@@ -225,7 +196,7 @@ func progFileList(dir, filter string) ([]string, error) {
return res, nil
}
-func (ctx *Context) generateFile(progs chan *runRequest, sandboxes []string, cover []bool, filename string) error {
+func (ctx *Context) generateFile(sandboxes []string, cover []bool, filename string) error {
p, requires, results, err := parseProg(ctx.Target, ctx.Dir, filename)
if err != nil {
return err
@@ -239,10 +210,10 @@ nextSandbox:
name := fmt.Sprintf("%v %v", filename, sandbox)
for _, call := range p.Calls {
if !ctx.EnabledCalls[sandbox][call.Meta] {
- progs <- &runRequest{
+ ctx.createTest(&runRequest{
name: name,
skip: fmt.Sprintf("unsupported call %v", call.Meta.Name),
- }
+ })
continue nextSandbox
}
}
@@ -267,6 +238,9 @@ nextSandbox:
if sandbox == "" {
break // executor does not support empty sandbox
}
+ if times != 1 {
+ break
+ }
name := name
if cov {
name += "/cover"
@@ -274,11 +248,11 @@ nextSandbox:
properties["cover"] = cov
properties["C"] = false
properties["executor"] = true
- req, err := ctx.createSyzTest(p, sandbox, threaded, cov, times)
+ req, err := ctx.createSyzTest(p, sandbox, threaded, cov)
if err != nil {
return err
}
- ctx.produceTest(progs, req, name, properties, requires, results)
+ ctx.produceTest(req, name, properties, requires, results)
}
if sysTarget.HostFuzzer {
// For HostFuzzer mode, we need to cross-compile
@@ -291,17 +265,17 @@ nextSandbox:
name += " C"
if !sysTarget.ExecutorUsesForkServer && times > 1 {
// Non-fork loop implementation does not support repetition.
- progs <- &runRequest{
+ ctx.createTest(&runRequest{
name: name,
broken: "non-forking loop",
- }
+ })
continue
}
req, err := ctx.createCTest(p, sandbox, threaded, times)
if err != nil {
return err
}
- ctx.produceTest(progs, req, name, properties, requires, results)
+ ctx.produceTest(req, name, properties, requires, results)
}
}
}
@@ -405,14 +379,52 @@ func checkArch(requires map[string]bool, arch string) bool {
return true
}
-func (ctx *Context) produceTest(progs chan *runRequest, req *runRequest, name string,
- properties, requires map[string]bool, results *flatrpc.ProgInfo) {
+func (ctx *Context) produceTest(req *runRequest, name string, properties,
+ requires map[string]bool, results *flatrpc.ProgInfo) {
req.name = name
req.results = results
if !match(properties, requires) {
req.skip = "excluded by constraints"
}
- progs <- req
+ ctx.createTest(req)
+}
+
+func (ctx *Context) createTest(req *runRequest) {
+ req.executor = ctx.executor.Append()
+ ctx.requests = append(ctx.requests, req)
+ if req.skip != "" || req.broken != "" {
+ return
+ }
+ if req.sourceOpts == nil {
+ ctx.submit(req)
+ return
+ }
+ go func() {
+ ctx.buildSem <- true
+ defer func() {
+ <-ctx.buildSem
+ }()
+ src, err := csource.Write(req.Prog, *req.sourceOpts)
+ if err != nil {
+ req.err = fmt.Errorf("failed to create C source: %w", err)
+ req.Request.Done(&queue.Result{})
+ }
+ bin, err := csource.Build(ctx.Target, src)
+ if err != nil {
+ req.err = fmt.Errorf("failed to build C program: %w", err)
+ req.Request.Done(&queue.Result{})
+ return
+ }
+ req.BinaryFile = bin
+ ctx.submit(req)
+ }()
+}
+
+func (ctx *Context) submit(req *runRequest) {
+ req.OnDone(func(_ *queue.Request, res *queue.Result) bool {
+ return ctx.onDone(req, res)
+ })
+ req.executor.Submit(req.Request)
}
func match(props, requires map[string]bool) bool {
@@ -436,9 +448,9 @@ func match(props, requires map[string]bool) bool {
return true
}
-func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bool, times int) (*runRequest, error) {
+func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bool) (*runRequest, error) {
var opts flatrpc.ExecOpts
- sandboxFlags, err := ipc.SandboxToFlags(sandbox)
+ sandboxFlags, err := flatrpc.SandboxToFlags(sandbox)
if err != nil {
return nil, err
}
@@ -451,7 +463,7 @@ func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bo
opts.ExecFlags |= flatrpc.ExecFlagCollectSignal
opts.ExecFlags |= flatrpc.ExecFlagCollectCover
}
- opts.EnvFlags |= ipc.FeaturesToFlags(ctx.Features, nil)
+ opts.EnvFlags |= csource.FeaturesToFlags(ctx.Features, nil)
if ctx.Debug {
opts.EnvFlags |= flatrpc.ExecEnvDebug
}
@@ -459,7 +471,6 @@ func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bo
Request: &queue.Request{
Prog: p,
ExecOpts: opts,
- Repeat: times,
},
}
return req, nil
@@ -496,27 +507,19 @@ func (ctx *Context) createCTest(p *prog.Prog, sandbox string, threaded bool, tim
opts.IEEE802154 = true
}
}
- src, err := csource.Write(p, opts)
- if err != nil {
- return nil, fmt.Errorf("failed to create C source: %w", err)
- }
- bin, err := csource.Build(p.Target, src)
- if err != nil {
- return nil, fmt.Errorf("failed to build C program: %w", err)
- }
var ipcFlags flatrpc.ExecFlag
if threaded {
ipcFlags |= flatrpc.ExecFlagThreaded
}
req := &runRequest{
+ sourceOpts: &opts,
Request: &queue.Request{
- Prog: p,
- BinaryFile: bin,
+ Prog: p,
ExecOpts: flatrpc.ExecOpts{
ExecFlags: ipcFlags,
},
- Repeat: times,
},
+ repeat: times,
}
return req, nil
}
@@ -525,27 +528,17 @@ func checkResult(req *runRequest) error {
if req.result.Status != queue.Success {
return fmt.Errorf("non-successful result status (%v)", req.result.Status)
}
- var infos []*flatrpc.ProgInfo
+ infos := []*flatrpc.ProgInfo{req.result.Info}
isC := req.BinaryFile != ""
if isC {
var err error
if infos, err = parseBinOutput(req); err != nil {
return err
}
- } else {
- raw := req.result.Info
- for len(raw.Calls) != 0 {
- ncalls := min(len(raw.Calls), len(req.Prog.Calls))
- infos = append(infos, &flatrpc.ProgInfo{
- Extra: raw.Extra,
- Calls: raw.Calls[:ncalls],
- })
- raw.Calls = raw.Calls[ncalls:]
- }
- }
- if req.Repeat != len(infos) {
- return fmt.Errorf("should repeat %v times, but repeated %v, prog calls %v, info calls %v\n%s",
- req.Repeat, len(infos), req.Prog.Calls, len(req.result.Info.Calls), req.result.Output)
+ if req.repeat != len(infos) {
+ return fmt.Errorf("should repeat %v times, but repeated %v, prog calls %v, info calls %v\n%s",
+ req.repeat, len(infos), req.Prog.Calls, len(req.result.Info.Calls), req.result.Output)
+ }
}
calls := make(map[string]bool)
for run, info := range infos {
diff --git a/pkg/runtest/run_test.go b/pkg/runtest/run_test.go
index f04ad4b0f..92b6c2d77 100644
--- a/pkg/runtest/run_test.go
+++ b/pkg/runtest/run_test.go
@@ -8,41 +8,41 @@ import (
"context"
"encoding/binary"
"encoding/hex"
- "errors"
"flag"
"fmt"
- "os"
"path/filepath"
"runtime"
"testing"
- "time"
"github.com/google/syzkaller/pkg/csource"
"github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
- "github.com/google/syzkaller/pkg/ipc"
"github.com/google/syzkaller/pkg/osutil"
+ "github.com/google/syzkaller/pkg/rpcserver"
"github.com/google/syzkaller/pkg/testutil"
+ "github.com/google/syzkaller/pkg/vminfo"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/targets"
_ "github.com/google/syzkaller/sys/test/gen" // pull in the test target
"github.com/stretchr/testify/assert"
)
-// Can be used as:
-// go test -v -run=Test/64_fork ./pkg/runtest -filter=nonfailing
-// to select a subset of tests to run.
-var flagFilter = flag.String("filter", "", "prefix to match test file names")
-
-var flagDebug = flag.Bool("debug", false, "include debug output from the executor")
+var (
+ // Can be used as:
+ // go test -v -run=Test/64_fork ./pkg/runtest -filter=nonfailing
+ // to select a subset of tests to run.
+ flagFilter = flag.String("filter", "", "prefix to match test file names")
+ flagDebug = flag.Bool("debug", false, "include debug output from the executor")
+ flagGDB = flag.Bool("gdb", false, "run executor under gdb")
+)
-func Test(t *testing.T) {
+func TestUnit(t *testing.T) {
switch runtime.GOOS {
case targets.OpenBSD:
t.Skipf("broken on %v", runtime.GOOS)
}
// Test only one target in short mode (each takes 5+ seconds to run).
- shortTarget := targets.Get(targets.TestOS, targets.TestArch64)
+ shortTarget := targets.Get(targets.TestOS, targets.TestArch64Fork)
for _, sysTarget := range targets.List[targets.TestOS] {
if testing.Short() && sysTarget != shortTarget {
continue
@@ -83,27 +83,7 @@ func test(t *testing.T, sysTarget *targets.Target) {
Verbose: true,
Debug: *flagDebug,
}
-
- executorCtx, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
- go func() {
- for {
- select {
- case <-time.After(time.Millisecond):
- case <-executorCtx.Done():
- return
- }
- req := ctx.Next()
- if req == nil {
- continue
- }
- if req.BinaryFile != "" {
- req.Done(runTestC(req))
- } else {
- req.Done(runTest(req, executor))
- }
- }
- }()
+ startRpcserver(t, target, executor, ctx)
if err := ctx.Run(); err != nil {
t.Fatal(err)
}
@@ -114,7 +94,7 @@ func TestCover(t *testing.T) {
// We inject given blobs into KCOV buffer using syz_inject_cover,
// and then test what we get back.
t.Parallel()
- for _, arch := range []string{targets.TestArch32, targets.TestArch64} {
+ for _, arch := range []string{targets.TestArch32, targets.TestArch64, targets.TestArch64Fork} {
sysTarget := targets.Get(targets.TestOS, arch)
t.Run(arch, func(t *testing.T) {
if sysTarget.BrokenCompiler != "" {
@@ -202,15 +182,15 @@ func testCover(t *testing.T, target *prog.Target) {
Input: makeCover64(0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011,
0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033, 0xc0dec0dec0000011),
Flags: flatrpc.ExecFlagCollectCover,
- Cover: []uint64{0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011,
- 0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033, 0xc0dec0dec0000011},
+ Cover: []uint64{0xc0dec0dec0000011, 0xc0dec0dec0000033, 0xc0dec0dec0000022,
+ 0xc0dec0dec0000011, 0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033},
},
{
Is64Bit: 1,
Input: makeCover64(0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011,
0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033, 0xc0dec0dec0000011),
Flags: flatrpc.ExecFlagCollectCover | flatrpc.ExecFlagDedupCover,
- Cover: []uint64{0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033},
+ Cover: []uint64{0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011},
},
// Signal hashing.
{
@@ -218,8 +198,8 @@ func testCover(t *testing.T, target *prog.Target) {
Input: makeCover64(0xc0dec0dec0011001, 0xc0dec0dec0022002, 0xc0dec0dec00330f0,
0xc0dec0dec0044b00, 0xc0dec0dec0011001, 0xc0dec0dec0022002),
Flags: flatrpc.ExecFlagCollectSignal,
- Signal: []uint64{0xc0dec0dec0011001, 0xc0dec0dec0022003, 0xc0dec0dec00330f2,
- 0xc0dec0dec0044bf0, 0xc0dec0dec0011b01},
+ Signal: []uint64{0xc0dec0dec0011b01, 0xc0dec0dec0044bf0, 0xc0dec0dec00330f2,
+ 0xc0dec0dec0022003, 0xc0dec0dec0011001},
},
// Invalid non-kernel PCs must fail test execution.
{
@@ -296,38 +276,49 @@ func testCover(t *testing.T, target *prog.Target) {
// TODO: test max signal filtering and cover filter when syz-executor handles them.
}
executor := csource.BuildExecutor(t, target, "../../")
+ source := queue.Plain()
+ startRpcserver(t, target, executor, source)
for i, test := range tests {
test := test
t.Run(fmt.Sprint(i), func(t *testing.T) {
t.Parallel()
- testCover1(t, target, executor, test)
+ testCover1(t, target, test, source)
})
}
}
-func testCover1(t *testing.T, target *prog.Target, executor string, test CoverTest) {
+func testCover1(t *testing.T, target *prog.Target, test CoverTest, source *queue.PlainQueue) {
text := fmt.Sprintf(`syz_inject_cover(0x%v, &AUTO="%s", AUTO)`, test.Is64Bit, hex.EncodeToString(test.Input))
p, err := target.Deserialize([]byte(text), prog.Strict)
if err != nil {
t.Fatal(err)
}
req := &queue.Request{
- Prog: p,
- Repeat: 1,
+ Prog: p,
ExecOpts: flatrpc.ExecOpts{
- EnvFlags: flatrpc.ExecEnvSignal,
+ EnvFlags: flatrpc.ExecEnvSignal | flatrpc.ExecEnvSandboxNone,
ExecFlags: test.Flags,
},
}
- res := runTest(req, executor)
+ if test.Flags&flatrpc.ExecFlagCollectSignal != 0 {
+ req.ReturnAllSignal = []int{0}
+ }
+ source.Submit(req)
+ res := req.Wait(context.Background())
if res.Err != nil || res.Info == nil || len(res.Info.Calls) != 1 || res.Info.Calls[0] == nil {
- t.Fatalf("program execution failed: %v\n%s", res.Err, res.Output)
+ t.Fatalf("program execution failed: status=%v err=%v\n%s", res.Status, res.Err, res.Output)
}
call := res.Info.Calls[0]
var comps [][2]uint64
for _, cmp := range call.Comps {
comps = append(comps, [2]uint64{cmp.Op1, cmp.Op2})
}
+ if test.Cover == nil {
+ test.Cover = []uint64{}
+ }
+ if test.Signal == nil {
+ test.Signal = []uint64{}
+ }
assert.Equal(t, test.Cover, call.Cover)
assert.Equal(t, test.Signal, call.Signal)
// Comparisons are reordered and order does not matter, so compare without order.
@@ -361,72 +352,38 @@ func makeComps(comps ...Comparison) []byte {
return w.Bytes()
}
-func runTest(req *queue.Request, executor string) *queue.Result {
- cfg := new(ipc.Config)
- sysTarget := targets.Get(req.Prog.Target.OS, req.Prog.Target.Arch)
- cfg.UseForkServer = sysTarget.ExecutorUsesForkServer
- cfg.Timeouts = sysTarget.Timeouts(1)
- cfg.Executor = executor
- env, err := ipc.MakeEnv(cfg, 0)
- if err != nil {
- return &queue.Result{
- Status: queue.ExecFailure,
- Err: fmt.Errorf("failed to create ipc env: %w", err),
- }
+func startRpcserver(t *testing.T, target *prog.Target, executor string, source queue.Source) {
+ ctx, done := context.WithCancel(context.Background())
+ cfg := &rpcserver.LocalConfig{
+ Config: rpcserver.Config{
+ Config: vminfo.Config{
+ Target: target,
+ Debug: *flagDebug,
+ Features: flatrpc.FeatureSandboxNone,
+ Sandbox: flatrpc.ExecEnvSandboxNone,
+ },
+ Procs: runtime.GOMAXPROCS(0),
+ Slowdown: 10, // to deflake slower tests
+ },
+ Executor: executor,
+ Dir: t.TempDir(),
+ Context: ctx,
+ GDB: *flagGDB,
}
- defer env.Close()
- ret := &queue.Result{Status: queue.Success}
- for run := 0; run < req.Repeat; run++ {
- if run%2 == 0 {
- // Recreate Env every few iterations, this allows to cover more paths.
- env.ForceRestart()
- }
- output, info, hanged, err := env.Exec(&req.ExecOpts, req.Prog)
- ret.Output = append(ret.Output, output...)
- if err != nil {
- return &queue.Result{
- Status: queue.ExecFailure,
- Err: fmt.Errorf("run %v: failed to run: %w", run, err),
- }
- }
- if hanged {
- return &queue.Result{
- Status: queue.ExecFailure,
- Err: fmt.Errorf("run %v: hanged", run),
- }
- }
- if run == 0 {
- ret.Info = info
- } else {
- ret.Info.Calls = append(ret.Info.Calls, info.Calls...)
- }
+ cfg.MachineChecked = func(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source {
+ cfg.Cover = true
+ return source
}
- return ret
-}
-
-func runTestC(req *queue.Request) *queue.Result {
- tmpDir, err := os.MkdirTemp("", "syz-runtest")
- if err != nil {
- return &queue.Result{
- Status: queue.ExecFailure,
- Err: fmt.Errorf("failed to create temp dir: %w", err),
+ errc := make(chan error)
+ go func() {
+ errc <- rpcserver.RunLocal(cfg)
+ }()
+ t.Cleanup(func() {
+ done()
+ if err := <-errc; err != nil {
+ t.Fatal(err)
}
- }
- defer os.RemoveAll(tmpDir)
- cmd := osutil.Command(req.BinaryFile)
- cmd.Dir = tmpDir
- // Tell ASAN to not mess with our NONFAILING.
- cmd.Env = append(append([]string{}, os.Environ()...), "ASAN_OPTIONS=handle_segv=0 allow_user_segv_handler=1")
- res := &queue.Result{}
- res.Output, res.Err = osutil.Run(20*time.Second, cmd)
- var verr *osutil.VerboseError
- if errors.As(res.Err, &verr) {
- // The process can legitimately do something like exit_group(1).
- // So we ignore the error and rely on the rest of the checks (e.g. syscall return values).
- res.Err = nil
- res.Output = verr.Output
- }
- return res
+ })
}
func TestParsing(t *testing.T) {
diff --git a/pkg/vminfo/features.go b/pkg/vminfo/features.go
index 150b658fc..67969a37f 100644
--- a/pkg/vminfo/features.go
+++ b/pkg/vminfo/features.go
@@ -49,6 +49,10 @@ func (ctx *checkContext) startFeaturesCheck() {
testProg := ctx.target.DataMmapProg()
for feat := range flatrpc.EnumNamesFeature {
feat := feat
+ if ctx.cfg.Features&feat == 0 {
+ ctx.features <- featureResult{feat, "disabled by user"}
+ continue
+ }
go func() {
envFlags, execFlags := ctx.featureToFlags(feat)
req := &queue.Request{
@@ -106,7 +110,7 @@ func (ctx *checkContext) finishFeatures(featureInfos []*flatrpc.FeatureInfo) (Fe
feat.Reason = strings.TrimSpace(outputReplacer.Replace(feat.Reason))
features[res.id] = feat
}
- if feat := features[flatrpc.FeatureSandboxSetuid]; !feat.Enabled {
+ if feat := features[flatrpc.FeatureSandboxNone]; !feat.Enabled {
return features, fmt.Errorf("execution of simple program fails: %v", feat.Reason)
}
if feat := features[flatrpc.FeatureCoverage]; ctx.cfg.Cover && !feat.Enabled {
@@ -118,7 +122,7 @@ func (ctx *checkContext) finishFeatures(featureInfos []*flatrpc.FeatureInfo) (Fe
// featureToFlags creates ipc flags required to test the feature on a simple program.
// For features that has setup procedure in the executor, we just execute with the default flags.
func (ctx *checkContext) featureToFlags(feat flatrpc.Feature) (flatrpc.ExecEnv, flatrpc.ExecFlag) {
- envFlags := ctx.sandbox
+ envFlags := ctx.cfg.Sandbox
// These don't have a corresponding feature and are always enabled.
envFlags |= flatrpc.ExecEnvEnableCloseFds | flatrpc.ExecEnvEnableCgroups | flatrpc.ExecEnvEnableNetReset
execFlags := flatrpc.ExecFlagThreaded
@@ -135,12 +139,18 @@ func (ctx *checkContext) featureToFlags(feat flatrpc.Feature) (flatrpc.ExecEnv,
case flatrpc.FeatureDelayKcovMmap:
envFlags |= flatrpc.ExecEnvSignal | flatrpc.ExecEnvDelayKcovMmap
execFlags |= flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover
+ case flatrpc.FeatureSandboxNone:
+ envFlags &= ^ctx.cfg.Sandbox
+ envFlags |= flatrpc.ExecEnvSandboxNone
case flatrpc.FeatureSandboxSetuid:
- // We use setuid sandbox feature to test that the simple program
- // succeeds under the actual sandbox (not necessary setuid).
- // We do this because we don't have a feature for sandbox 'none'.
+ envFlags &= ^ctx.cfg.Sandbox
+ envFlags |= flatrpc.ExecEnvSandboxSetuid
case flatrpc.FeatureSandboxNamespace:
+ envFlags &= ^ctx.cfg.Sandbox
+ envFlags |= flatrpc.ExecEnvSandboxNamespace
case flatrpc.FeatureSandboxAndroid:
+ envFlags &= ^ctx.cfg.Sandbox
+ envFlags |= flatrpc.ExecEnvSandboxAndroid
case flatrpc.FeatureFault:
case flatrpc.FeatureLeak:
case flatrpc.FeatureNetInjection:
diff --git a/pkg/vminfo/syscalls.go b/pkg/vminfo/syscalls.go
index 8a533227b..178e5d52c 100644
--- a/pkg/vminfo/syscalls.go
+++ b/pkg/vminfo/syscalls.go
@@ -11,8 +11,6 @@ import (
"github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
- "github.com/google/syzkaller/pkg/ipc"
- "github.com/google/syzkaller/pkg/mgrconfig"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/targets"
)
@@ -31,9 +29,8 @@ import (
type checkContext struct {
ctx context.Context
impl checker
- cfg *mgrconfig.Config
+ cfg *Config
target *prog.Target
- sandbox flatrpc.ExecEnv
executor queue.Executor
fs filesystem
// Once checking of a syscall is finished, the result is sent to syscalls.
@@ -48,18 +45,12 @@ type syscallResult struct {
reason string
}
-func newCheckContext(ctx context.Context, cfg *mgrconfig.Config, impl checker,
- executor queue.Executor) *checkContext {
- sandbox, err := ipc.SandboxToFlags(cfg.Sandbox)
- if err != nil {
- panic(fmt.Sprintf("failed to parse sandbox: %v", err))
- }
+func newCheckContext(ctx context.Context, cfg *Config, impl checker, executor queue.Executor) *checkContext {
return &checkContext{
ctx: ctx,
impl: impl,
cfg: cfg,
target: cfg.Target,
- sandbox: sandbox,
executor: executor,
syscalls: make(chan syscallResult),
features: make(chan featureResult, 100),
@@ -67,6 +58,7 @@ func newCheckContext(ctx context.Context, cfg *mgrconfig.Config, impl checker,
}
func (ctx *checkContext) start(fileInfos []*flatrpc.FileInfo) {
+ sysTarget := targets.Get(ctx.cfg.Target.OS, ctx.cfg.Target.Arch)
ctx.fs = createVirtualFilesystem(fileInfos)
for _, id := range ctx.cfg.Syscalls {
call := ctx.target.Syscalls[id]
@@ -82,12 +74,12 @@ func (ctx *checkContext) start(fileInfos []*flatrpc.FileInfo) {
}
// HostFuzzer targets can't run Go binaries on the targets,
// so we actually run on the host on another OS. The same for targets.TestOS OS.
- if ctx.cfg.SysTarget.HostFuzzer || ctx.target.OS == targets.TestOS {
+ if sysTarget.HostFuzzer || ctx.target.OS == targets.TestOS {
syscallCheck = alwaysSupported
}
go func() {
var reason string
- deps := ctx.cfg.SysTarget.PseudoSyscallDeps[call.CallName]
+ deps := sysTarget.PseudoSyscallDeps[call.CallName]
if len(deps) != 0 {
reason = ctx.supportedSyscalls(deps)
}
@@ -215,14 +207,14 @@ func (ctx *checkContext) anyCallSucceeds(calls []string, msg string) string {
}
func (ctx *checkContext) onlySandboxNone() string {
- if ctx.sandbox != 0 {
+ if ctx.cfg.Sandbox != flatrpc.ExecEnvSandboxNone {
return "only supported under root with sandbox=none"
}
return ""
}
func (ctx *checkContext) onlySandboxNoneOrNamespace() string {
- if ctx.sandbox != 0 && ctx.sandbox != flatrpc.ExecEnvSandboxNamespace {
+ if ctx.cfg.Sandbox != flatrpc.ExecEnvSandboxNone && ctx.cfg.Sandbox != flatrpc.ExecEnvSandboxNamespace {
return "only supported under root with sandbox=none/namespace"
}
return ""
@@ -237,9 +229,9 @@ func (ctx *checkContext) val(name string) uint64 {
}
func (ctx *checkContext) execRaw(calls []string, mode prog.DeserializeMode, root bool) *flatrpc.ProgInfo {
- sandbox := ctx.sandbox
+ sandbox := ctx.cfg.Sandbox
if root {
- sandbox = 0
+ sandbox = flatrpc.ExecEnvSandboxNone
}
info := &flatrpc.ProgInfo{}
for remain := calls; len(remain) != 0; {
@@ -265,13 +257,9 @@ func (ctx *checkContext) execRaw(calls []string, mode prog.DeserializeMode, root
res := req.Wait(ctx.ctx)
if res.Status == queue.Success {
info.Calls = append(info.Calls, res.Info.Calls...)
- } else if res.Status == queue.Crashed {
+ } else {
// Pretend these calls were not executed.
info.Calls = append(info.Calls, flatrpc.EmptyProgInfo(ncalls).Calls...)
- } else {
- // The program must have been either executed or not due to a crash.
- panic(fmt.Sprintf("got unexpected execution status (%d) for the prog %s",
- res.Status, progStr))
}
}
if len(info.Calls) != len(calls) {
diff --git a/pkg/vminfo/vminfo.go b/pkg/vminfo/vminfo.go
index a880f2f5b..b65baaac8 100644
--- a/pkg/vminfo/vminfo.go
+++ b/pkg/vminfo/vminfo.go
@@ -25,7 +25,6 @@ import (
"github.com/google/syzkaller/pkg/cover"
"github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
- "github.com/google/syzkaller/pkg/mgrconfig"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/targets"
)
@@ -36,9 +35,21 @@ type Checker struct {
checkContext *checkContext
}
-func New(cfg *mgrconfig.Config) *Checker {
+type Config struct {
+ Target *prog.Target
+ // Set of features to check, missing features won't be checked/enabled after Run.
+ Features flatrpc.Feature
+ // Set of syscalls to check.
+ Syscalls []int
+ Debug bool
+ Cover bool
+ Sandbox flatrpc.ExecEnv
+ SandboxArg int64
+}
+
+func New(cfg *Config) *Checker {
var impl checker
- switch cfg.TargetOS {
+ switch cfg.Target.OS {
case targets.Linux:
impl = new(linux)
case targets.NetBSD:
diff --git a/pkg/vminfo/vminfo_test.go b/pkg/vminfo/vminfo_test.go
index f58e3f7e5..2be23ca66 100644
--- a/pkg/vminfo/vminfo_test.go
+++ b/pkg/vminfo/vminfo_test.go
@@ -4,6 +4,8 @@
package vminfo
import (
+ "os"
+ "path/filepath"
"runtime"
"strings"
"testing"
@@ -11,9 +13,6 @@ import (
"github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
- "github.com/google/syzkaller/pkg/host"
- "github.com/google/syzkaller/pkg/ipc"
- "github.com/google/syzkaller/pkg/mgrconfig"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/targets"
)
@@ -121,29 +120,62 @@ func createSuccessfulResults(source queue.Source, stop chan struct{}) {
func hostChecker(t *testing.T) (*Checker, []*flatrpc.FileInfo) {
cfg := testConfig(t, runtime.GOOS, runtime.GOARCH)
checker := New(cfg)
- files := host.ReadFiles(checker.RequiredFiles())
+ files := readFiles(checker.RequiredFiles())
return checker, files
}
-func testConfig(t *testing.T, OS, arch string) *mgrconfig.Config {
+func testConfig(t *testing.T, OS, arch string) *Config {
target, err := prog.GetTarget(OS, arch)
if err != nil {
t.Fatal(err)
}
- cfg := &mgrconfig.Config{
- Sandbox: ipc.FlagsToSandbox(0),
- Derived: mgrconfig.Derived{
- TargetOS: OS,
- TargetArch: arch,
- TargetVMArch: arch,
- Target: target,
- SysTarget: targets.Get(OS, arch),
- },
- }
+ var syscalls []int
for id := range target.Syscalls {
if !target.Syscalls[id].Attrs.Disabled {
- cfg.Syscalls = append(cfg.Syscalls, id)
+ syscalls = append(syscalls, id)
+ }
+ }
+ return &Config{
+ Target: target,
+ Features: flatrpc.AllFeatures,
+ Sandbox: flatrpc.ExecEnvSandboxNone,
+ Syscalls: syscalls,
+ }
+}
+
+func readFiles(files []string) []*flatrpc.FileInfo {
+ var res []*flatrpc.FileInfo
+ for _, glob := range files {
+ glob = filepath.FromSlash(glob)
+ if !strings.Contains(glob, "*") {
+ res = append(res, readFile(glob))
+ continue
+ }
+ matches, err := filepath.Glob(glob)
+ if err != nil {
+ res = append(res, &flatrpc.FileInfo{
+ Name: glob,
+ Error: err.Error(),
+ })
+ continue
}
+ for _, file := range matches {
+ res = append(res, readFile(file))
+ }
+ }
+ return res
+}
+
+func readFile(file string) *flatrpc.FileInfo {
+ data, err := os.ReadFile(file)
+ exists, errStr := true, ""
+ if err != nil {
+ exists, errStr = !os.IsNotExist(err), err.Error()
+ }
+ return &flatrpc.FileInfo{
+ Name: file,
+ Exists: exists,
+ Error: errStr,
+ Data: data,
}
- return cfg
}