aboutsummaryrefslogtreecommitdiffstats
path: root/pkg/runtest
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2024-06-04 12:55:41 +0200
committerDmitry Vyukov <dvyukov@google.com>2024-06-24 09:57:34 +0000
commite16e2c9a4cb6937323e861b646792a6c4c978a3c (patch)
tree6c513e98e5f465b44a98546d8984485d2c128582 /pkg/runtest
parent90d67044dab68568e8f35bc14b68055dbd166eff (diff)
executor: add runner mode
Move all syz-fuzzer logic into syz-executor and remove syz-fuzzer. Also restore syz-runtest functionality in the manager. Update #4917 (sets most signal handlers to SIG_IGN)
Diffstat (limited to 'pkg/runtest')
-rw-r--r--pkg/runtest/executor_test.go131
-rw-r--r--pkg/runtest/run.go253
-rw-r--r--pkg/runtest/run_test.go175
3 files changed, 320 insertions, 239 deletions
diff --git a/pkg/runtest/executor_test.go b/pkg/runtest/executor_test.go
new file mode 100644
index 000000000..d6f9a8434
--- /dev/null
+++ b/pkg/runtest/executor_test.go
@@ -0,0 +1,131 @@
+// Copyright 2015 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package runtest
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "runtime"
+ "testing"
+ "time"
+
+ "github.com/google/syzkaller/pkg/csource"
+ "github.com/google/syzkaller/pkg/flatrpc"
+ "github.com/google/syzkaller/pkg/fuzzer/queue"
+ "github.com/google/syzkaller/pkg/image"
+ "github.com/google/syzkaller/pkg/osutil"
+ "github.com/google/syzkaller/pkg/testutil"
+ "github.com/google/syzkaller/prog"
+ _ "github.com/google/syzkaller/sys"
+ "github.com/google/syzkaller/sys/targets"
+)
+
+// TestExecutor runs all internal executor unit tests.
+// We do it here because we already build executor binary here.
+func TestExecutor(t *testing.T) {
+ t.Parallel()
+ for _, sysTarget := range targets.List[runtime.GOOS] {
+ sysTarget := targets.Get(runtime.GOOS, sysTarget.Arch)
+ t.Run(sysTarget.Arch, func(t *testing.T) {
+ if sysTarget.BrokenCompiler != "" {
+ t.Skipf("skipping, broken cross-compiler: %v", sysTarget.BrokenCompiler)
+ }
+ t.Parallel()
+ target, err := prog.GetTarget(runtime.GOOS, sysTarget.Arch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bin := csource.BuildExecutor(t, target, "../..")
+ // qemu-user may allow us to run some cross-arch binaries.
+ if _, err := osutil.RunCmd(time.Minute, "", bin, "test"); err != nil {
+ if sysTarget.Arch == runtime.GOARCH || sysTarget.VMArch == runtime.GOARCH {
+ t.Fatal(err)
+ }
+ t.Skipf("skipping, cross-arch binary failed: %v", err)
+ }
+ })
+ }
+}
+
+func TestZlib(t *testing.T) {
+ t.Parallel()
+ target, err := prog.GetTarget(targets.TestOS, targets.TestArch64)
+ if err != nil {
+ t.Fatal(err)
+ }
+ sysTarget := targets.Get(target.OS, target.Arch)
+ if sysTarget.BrokenCompiler != "" {
+ t.Skipf("skipping, broken cross-compiler: %v", sysTarget.BrokenCompiler)
+ }
+ executor := csource.BuildExecutor(t, target, "../..")
+ source := queue.Plain()
+ startRpcserver(t, target, executor, source)
+ r := rand.New(testutil.RandSource(t))
+ for i := 0; i < 10; i++ {
+ data := testutil.RandMountImage(r)
+ compressed := image.Compress(data)
+ text := fmt.Sprintf(`syz_compare_zlib(&(0x7f0000000000)="$%s", AUTO, &(0x7f0000800000)="$%s", AUTO)`,
+ image.EncodeB64(data), image.EncodeB64(compressed))
+ p, err := target.Deserialize([]byte(text), prog.Strict)
+ if err != nil {
+ t.Fatalf("failed to deserialize empty program: %v", err)
+ }
+ req := &queue.Request{
+ Prog: p,
+ ReturnError: true,
+ ReturnOutput: true,
+ ExecOpts: flatrpc.ExecOpts{
+ EnvFlags: flatrpc.ExecEnvSandboxNone,
+ },
+ }
+ source.Submit(req)
+ res := req.Wait(context.Background())
+ if res.Err != nil {
+ t.Fatalf("program execution failed: %v\n%s", res.Err, res.Output)
+ }
+ if res.Info.Calls[0].Error != 0 {
+ t.Fatalf("data comparison failed: %v\n%s", res.Info.Calls[0].Error, res.Output)
+ }
+ }
+}
+
+func TestExecutorCommonExt(t *testing.T) {
+ t.Parallel()
+ target, err := prog.GetTarget("test", "64_fork")
+ if err != nil {
+ t.Fatal(err)
+ }
+ sysTarget := targets.Get(target.OS, target.Arch)
+ if sysTarget.BrokenCompiler != "" {
+ t.Skipf("skipping, broken cross-compiler: %v", sysTarget.BrokenCompiler)
+ }
+ executor := csource.BuildExecutor(t, target, "../..", "-DSYZ_TEST_COMMON_EXT_EXAMPLE=1")
+ // The example setup_ext_test does:
+ // *(uint64*)(SYZ_DATA_OFFSET + 0x1234) = 0xbadc0ffee;
+ // The following program tests that that value is present at 0x1234.
+ test := `syz_compare(&(0x7f0000001234)="", 0x8, &(0x7f0000000000)=@blob="eeffc0ad0b000000", AUTO)`
+ p, err := target.Deserialize([]byte(test), prog.Strict)
+ if err != nil {
+ t.Fatal(err)
+ }
+ source := queue.Plain()
+ startRpcserver(t, target, executor, source)
+ req := &queue.Request{
+ Prog: p,
+ ReturnError: true,
+ ReturnOutput: true,
+ ExecOpts: flatrpc.ExecOpts{
+ EnvFlags: flatrpc.ExecEnvSandboxNone,
+ },
+ }
+ source.Submit(req)
+ res := req.Wait(context.Background())
+ if res.Err != nil {
+ t.Fatalf("program execution failed: %v\n%s", res.Err, res.Output)
+ }
+ if call := res.Info.Calls[0]; call.Flags&flatrpc.CallFlagFinished == 0 || call.Error != 0 {
+ t.Fatalf("bad call result: flags=%x errno=%v", call.Flags, call.Error)
+ }
+}
diff --git a/pkg/runtest/run.go b/pkg/runtest/run.go
index eb57582a3..cef85f6e9 100644
--- a/pkg/runtest/run.go
+++ b/pkg/runtest/run.go
@@ -22,25 +22,25 @@ import (
"sort"
"strconv"
"strings"
- "sync"
"github.com/google/syzkaller/pkg/csource"
"github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
- "github.com/google/syzkaller/pkg/ipc"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/targets"
- "golang.org/x/sync/errgroup"
)
type runRequest struct {
*queue.Request
+ sourceOpts *csource.Options
+ executor queue.Executor
ok int
failed int
err error
result *queue.Result
results *flatrpc.ProgInfo // the expected results
+ repeat int // only relevant for C tests
name string
broken string
@@ -58,7 +58,9 @@ type Context struct {
Debug bool
Tests string // prefix to match test file names
- executor queue.PlainQueue
+ executor *queue.DynamicOrderer
+ requests []*runRequest
+ buildSem chan bool
}
func (ctx *Context) log(msg string, args ...interface{}) {
@@ -66,87 +68,11 @@ func (ctx *Context) log(msg string, args ...interface{}) {
}
func (ctx *Context) Run() error {
- if ctx.Retries%2 == 0 {
- ctx.Retries++
- }
- progs := make(chan *runRequest, 1000)
- var eg errgroup.Group
- eg.Go(func() error {
- defer close(progs)
- return ctx.generatePrograms(progs)
- })
- done := make(chan *runRequest)
- eg.Go(func() error {
- return ctx.processResults(done)
- })
-
- var wg sync.WaitGroup
- for req := range progs {
- req := req
- if req.broken != "" || req.skip != "" {
- done <- req
- continue
- }
- var retry queue.DoneCallback
- retry = func(_ *queue.Request, res *queue.Result) bool {
- // The tests depend on timings and may be flaky, esp on overloaded/slow machines.
- // We don't want to fix this by significantly bumping all timeouts,
- // because if a program fails all the time with the default timeouts,
- // it will also fail during fuzzing. And we want to ensure that it's not the case.
- // So what we want is to tolerate episodic failures with the default timeouts.
- // To achieve this we run each test several times and ensure that it passes
- // in 50+% of cases (i.e. 1/1, 2/3, 3/5, 4/7, etc).
- // In the best case this allows to get off with just 1 test run.
-
- if res.Err != nil {
- req.err = res.Err
- return true
- }
- req.result = res
- err := checkResult(req)
- if err == nil {
- req.ok++
- } else {
- req.failed++
- req.err = err
- }
- if req.ok > req.failed {
- // There are more successful than failed runs.
- req.err = nil
- return true
- }
- // We need at least `failed - ok + 1` more runs <=> `failed + ok + need` in total,
- // which simplifies to `failed * 2 + 1`.
- if req.failed*2+1 <= ctx.Retries {
- // We can still retry the execution.
- req.OnDone(retry)
- ctx.executor.Submit(req.Request)
- return false
- }
- // Give up and fail on this request.
- return true
- }
- req.Request.OnDone(retry)
- ctx.executor.Submit(req.Request)
- wg.Add(1)
- go func() {
- defer wg.Done()
- req.Request.Wait(context.Background())
- done <- req
- }()
- }
- wg.Wait()
- close(done)
- return eg.Wait()
-}
-
-func (ctx *Context) Next() *queue.Request {
- return ctx.executor.Next()
-}
-
-func (ctx *Context) processResults(requests chan *runRequest) error {
+ ctx.buildSem = make(chan bool, runtime.GOMAXPROCS(0))
+ ctx.executor = queue.DynamicOrder()
+ ctx.generatePrograms()
var ok, fail, broken, skip int
- for req := range requests {
+ for _, req := range ctx.requests {
result := ""
verbose := false
if req.broken != "" {
@@ -158,14 +84,14 @@ func (ctx *Context) processResults(requests chan *runRequest) error {
result = fmt.Sprintf("SKIP (%v)", req.skip)
verbose = true
} else {
+ req.Request.Wait(context.Background())
if req.err != nil {
fail++
result = fmt.Sprintf("FAIL: %v",
strings.Replace(req.err.Error(), "\n", "\n\t", -1))
- res := req.result
- if len(res.Output) != 0 {
+ if req.result != nil && len(req.result.Output) != 0 {
result += fmt.Sprintf("\n\t%s",
- strings.Replace(string(res.Output), "\n", "\n\t", -1))
+ strings.Replace(string(req.result.Output), "\n", "\n\t", -1))
}
} else {
ok++
@@ -186,7 +112,52 @@ func (ctx *Context) processResults(requests chan *runRequest) error {
return nil
}
-func (ctx *Context) generatePrograms(progs chan *runRequest) error {
+func (ctx *Context) Next() *queue.Request {
+ return ctx.executor.Next()
+}
+
+func (ctx *Context) onDone(req *runRequest, res *queue.Result) bool {
+ // The tests depend on timings and may be flaky, esp on overloaded/slow machines.
+ // We don't want to fix this by significantly bumping all timeouts,
+ // because if a program fails all the time with the default timeouts,
+ // it will also fail during fuzzing. And we want to ensure that it's not the case.
+ // So what we want is to tolerate episodic failures with the default timeouts.
+ // To achieve this we run each test several times and ensure that it passes
+ // in 50+% of cases (i.e. 1/1, 2/3, 3/5, 4/7, etc).
+ // In the best case this allows to get off with just 1 test run.
+ if res.Err != nil {
+ req.err = res.Err
+ return true
+ }
+ req.result = res
+ err := checkResult(req)
+ if err == nil {
+ req.ok++
+ } else {
+ req.failed++
+ req.err = err
+ }
+ if req.ok > req.failed {
+ // There are more successful than failed runs.
+ req.err = nil
+ return true
+ }
+ // We need at least `failed - ok + 1` more runs <=> `failed + ok + need` in total,
+ // which simplifies to `failed * 2 + 1`.
+ retries := ctx.Retries
+ if retries%2 == 0 {
+ retries++
+ }
+ if req.failed*2+1 <= retries {
+ // We can still retry the execution.
+ ctx.submit(req)
+ return false
+ }
+ // Give up and fail on this request.
+ return true
+}
+
+func (ctx *Context) generatePrograms() error {
cover := []bool{false}
if ctx.Features&flatrpc.FeatureCoverage != 0 {
cover = append(cover, true)
@@ -201,7 +172,7 @@ func (ctx *Context) generatePrograms(progs chan *runRequest) error {
return err
}
for _, file := range files {
- if err := ctx.generateFile(progs, sandboxes, cover, file); err != nil {
+ if err := ctx.generateFile(sandboxes, cover, file); err != nil {
return err
}
}
@@ -225,7 +196,7 @@ func progFileList(dir, filter string) ([]string, error) {
return res, nil
}
-func (ctx *Context) generateFile(progs chan *runRequest, sandboxes []string, cover []bool, filename string) error {
+func (ctx *Context) generateFile(sandboxes []string, cover []bool, filename string) error {
p, requires, results, err := parseProg(ctx.Target, ctx.Dir, filename)
if err != nil {
return err
@@ -239,10 +210,10 @@ nextSandbox:
name := fmt.Sprintf("%v %v", filename, sandbox)
for _, call := range p.Calls {
if !ctx.EnabledCalls[sandbox][call.Meta] {
- progs <- &runRequest{
+ ctx.createTest(&runRequest{
name: name,
skip: fmt.Sprintf("unsupported call %v", call.Meta.Name),
- }
+ })
continue nextSandbox
}
}
@@ -267,6 +238,9 @@ nextSandbox:
if sandbox == "" {
break // executor does not support empty sandbox
}
+ if times != 1 {
+ break
+ }
name := name
if cov {
name += "/cover"
@@ -274,11 +248,11 @@ nextSandbox:
properties["cover"] = cov
properties["C"] = false
properties["executor"] = true
- req, err := ctx.createSyzTest(p, sandbox, threaded, cov, times)
+ req, err := ctx.createSyzTest(p, sandbox, threaded, cov)
if err != nil {
return err
}
- ctx.produceTest(progs, req, name, properties, requires, results)
+ ctx.produceTest(req, name, properties, requires, results)
}
if sysTarget.HostFuzzer {
// For HostFuzzer mode, we need to cross-compile
@@ -291,17 +265,17 @@ nextSandbox:
name += " C"
if !sysTarget.ExecutorUsesForkServer && times > 1 {
// Non-fork loop implementation does not support repetition.
- progs <- &runRequest{
+ ctx.createTest(&runRequest{
name: name,
broken: "non-forking loop",
- }
+ })
continue
}
req, err := ctx.createCTest(p, sandbox, threaded, times)
if err != nil {
return err
}
- ctx.produceTest(progs, req, name, properties, requires, results)
+ ctx.produceTest(req, name, properties, requires, results)
}
}
}
@@ -405,14 +379,52 @@ func checkArch(requires map[string]bool, arch string) bool {
return true
}
-func (ctx *Context) produceTest(progs chan *runRequest, req *runRequest, name string,
- properties, requires map[string]bool, results *flatrpc.ProgInfo) {
+func (ctx *Context) produceTest(req *runRequest, name string, properties,
+ requires map[string]bool, results *flatrpc.ProgInfo) {
req.name = name
req.results = results
if !match(properties, requires) {
req.skip = "excluded by constraints"
}
- progs <- req
+ ctx.createTest(req)
+}
+
+func (ctx *Context) createTest(req *runRequest) {
+ req.executor = ctx.executor.Append()
+ ctx.requests = append(ctx.requests, req)
+ if req.skip != "" || req.broken != "" {
+ return
+ }
+ if req.sourceOpts == nil {
+ ctx.submit(req)
+ return
+ }
+ go func() {
+ ctx.buildSem <- true
+ defer func() {
+ <-ctx.buildSem
+ }()
+ src, err := csource.Write(req.Prog, *req.sourceOpts)
+ if err != nil {
+ req.err = fmt.Errorf("failed to create C source: %w", err)
+ req.Request.Done(&queue.Result{})
+ }
+ bin, err := csource.Build(ctx.Target, src)
+ if err != nil {
+ req.err = fmt.Errorf("failed to build C program: %w", err)
+ req.Request.Done(&queue.Result{})
+ return
+ }
+ req.BinaryFile = bin
+ ctx.submit(req)
+ }()
+}
+
+func (ctx *Context) submit(req *runRequest) {
+ req.OnDone(func(_ *queue.Request, res *queue.Result) bool {
+ return ctx.onDone(req, res)
+ })
+ req.executor.Submit(req.Request)
}
func match(props, requires map[string]bool) bool {
@@ -436,9 +448,9 @@ func match(props, requires map[string]bool) bool {
return true
}
-func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bool, times int) (*runRequest, error) {
+func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bool) (*runRequest, error) {
var opts flatrpc.ExecOpts
- sandboxFlags, err := ipc.SandboxToFlags(sandbox)
+ sandboxFlags, err := flatrpc.SandboxToFlags(sandbox)
if err != nil {
return nil, err
}
@@ -451,7 +463,7 @@ func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bo
opts.ExecFlags |= flatrpc.ExecFlagCollectSignal
opts.ExecFlags |= flatrpc.ExecFlagCollectCover
}
- opts.EnvFlags |= ipc.FeaturesToFlags(ctx.Features, nil)
+ opts.EnvFlags |= csource.FeaturesToFlags(ctx.Features, nil)
if ctx.Debug {
opts.EnvFlags |= flatrpc.ExecEnvDebug
}
@@ -459,7 +471,6 @@ func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bo
Request: &queue.Request{
Prog: p,
ExecOpts: opts,
- Repeat: times,
},
}
return req, nil
@@ -496,27 +507,19 @@ func (ctx *Context) createCTest(p *prog.Prog, sandbox string, threaded bool, tim
opts.IEEE802154 = true
}
}
- src, err := csource.Write(p, opts)
- if err != nil {
- return nil, fmt.Errorf("failed to create C source: %w", err)
- }
- bin, err := csource.Build(p.Target, src)
- if err != nil {
- return nil, fmt.Errorf("failed to build C program: %w", err)
- }
var ipcFlags flatrpc.ExecFlag
if threaded {
ipcFlags |= flatrpc.ExecFlagThreaded
}
req := &runRequest{
+ sourceOpts: &opts,
Request: &queue.Request{
- Prog: p,
- BinaryFile: bin,
+ Prog: p,
ExecOpts: flatrpc.ExecOpts{
ExecFlags: ipcFlags,
},
- Repeat: times,
},
+ repeat: times,
}
return req, nil
}
@@ -525,27 +528,17 @@ func checkResult(req *runRequest) error {
if req.result.Status != queue.Success {
return fmt.Errorf("non-successful result status (%v)", req.result.Status)
}
- var infos []*flatrpc.ProgInfo
+ infos := []*flatrpc.ProgInfo{req.result.Info}
isC := req.BinaryFile != ""
if isC {
var err error
if infos, err = parseBinOutput(req); err != nil {
return err
}
- } else {
- raw := req.result.Info
- for len(raw.Calls) != 0 {
- ncalls := min(len(raw.Calls), len(req.Prog.Calls))
- infos = append(infos, &flatrpc.ProgInfo{
- Extra: raw.Extra,
- Calls: raw.Calls[:ncalls],
- })
- raw.Calls = raw.Calls[ncalls:]
- }
- }
- if req.Repeat != len(infos) {
- return fmt.Errorf("should repeat %v times, but repeated %v, prog calls %v, info calls %v\n%s",
- req.Repeat, len(infos), req.Prog.Calls, len(req.result.Info.Calls), req.result.Output)
+ if req.repeat != len(infos) {
+ return fmt.Errorf("should repeat %v times, but repeated %v, prog calls %v, info calls %v\n%s",
+ req.repeat, len(infos), req.Prog.Calls, len(req.result.Info.Calls), req.result.Output)
+ }
}
calls := make(map[string]bool)
for run, info := range infos {
diff --git a/pkg/runtest/run_test.go b/pkg/runtest/run_test.go
index f04ad4b0f..92b6c2d77 100644
--- a/pkg/runtest/run_test.go
+++ b/pkg/runtest/run_test.go
@@ -8,41 +8,41 @@ import (
"context"
"encoding/binary"
"encoding/hex"
- "errors"
"flag"
"fmt"
- "os"
"path/filepath"
"runtime"
"testing"
- "time"
"github.com/google/syzkaller/pkg/csource"
"github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
- "github.com/google/syzkaller/pkg/ipc"
"github.com/google/syzkaller/pkg/osutil"
+ "github.com/google/syzkaller/pkg/rpcserver"
"github.com/google/syzkaller/pkg/testutil"
+ "github.com/google/syzkaller/pkg/vminfo"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/targets"
_ "github.com/google/syzkaller/sys/test/gen" // pull in the test target
"github.com/stretchr/testify/assert"
)
-// Can be used as:
-// go test -v -run=Test/64_fork ./pkg/runtest -filter=nonfailing
-// to select a subset of tests to run.
-var flagFilter = flag.String("filter", "", "prefix to match test file names")
-
-var flagDebug = flag.Bool("debug", false, "include debug output from the executor")
+var (
+ // Can be used as:
+ // go test -v -run=Test/64_fork ./pkg/runtest -filter=nonfailing
+ // to select a subset of tests to run.
+ flagFilter = flag.String("filter", "", "prefix to match test file names")
+ flagDebug = flag.Bool("debug", false, "include debug output from the executor")
+ flagGDB = flag.Bool("gdb", false, "run executor under gdb")
+)
-func Test(t *testing.T) {
+func TestUnit(t *testing.T) {
switch runtime.GOOS {
case targets.OpenBSD:
t.Skipf("broken on %v", runtime.GOOS)
}
// Test only one target in short mode (each takes 5+ seconds to run).
- shortTarget := targets.Get(targets.TestOS, targets.TestArch64)
+ shortTarget := targets.Get(targets.TestOS, targets.TestArch64Fork)
for _, sysTarget := range targets.List[targets.TestOS] {
if testing.Short() && sysTarget != shortTarget {
continue
@@ -83,27 +83,7 @@ func test(t *testing.T, sysTarget *targets.Target) {
Verbose: true,
Debug: *flagDebug,
}
-
- executorCtx, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
- go func() {
- for {
- select {
- case <-time.After(time.Millisecond):
- case <-executorCtx.Done():
- return
- }
- req := ctx.Next()
- if req == nil {
- continue
- }
- if req.BinaryFile != "" {
- req.Done(runTestC(req))
- } else {
- req.Done(runTest(req, executor))
- }
- }
- }()
+ startRpcserver(t, target, executor, ctx)
if err := ctx.Run(); err != nil {
t.Fatal(err)
}
@@ -114,7 +94,7 @@ func TestCover(t *testing.T) {
// We inject given blobs into KCOV buffer using syz_inject_cover,
// and then test what we get back.
t.Parallel()
- for _, arch := range []string{targets.TestArch32, targets.TestArch64} {
+ for _, arch := range []string{targets.TestArch32, targets.TestArch64, targets.TestArch64Fork} {
sysTarget := targets.Get(targets.TestOS, arch)
t.Run(arch, func(t *testing.T) {
if sysTarget.BrokenCompiler != "" {
@@ -202,15 +182,15 @@ func testCover(t *testing.T, target *prog.Target) {
Input: makeCover64(0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011,
0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033, 0xc0dec0dec0000011),
Flags: flatrpc.ExecFlagCollectCover,
- Cover: []uint64{0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011,
- 0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033, 0xc0dec0dec0000011},
+ Cover: []uint64{0xc0dec0dec0000011, 0xc0dec0dec0000033, 0xc0dec0dec0000022,
+ 0xc0dec0dec0000011, 0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033},
},
{
Is64Bit: 1,
Input: makeCover64(0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011,
0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033, 0xc0dec0dec0000011),
Flags: flatrpc.ExecFlagCollectCover | flatrpc.ExecFlagDedupCover,
- Cover: []uint64{0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033},
+ Cover: []uint64{0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011},
},
// Signal hashing.
{
@@ -218,8 +198,8 @@ func testCover(t *testing.T, target *prog.Target) {
Input: makeCover64(0xc0dec0dec0011001, 0xc0dec0dec0022002, 0xc0dec0dec00330f0,
0xc0dec0dec0044b00, 0xc0dec0dec0011001, 0xc0dec0dec0022002),
Flags: flatrpc.ExecFlagCollectSignal,
- Signal: []uint64{0xc0dec0dec0011001, 0xc0dec0dec0022003, 0xc0dec0dec00330f2,
- 0xc0dec0dec0044bf0, 0xc0dec0dec0011b01},
+ Signal: []uint64{0xc0dec0dec0011b01, 0xc0dec0dec0044bf0, 0xc0dec0dec00330f2,
+ 0xc0dec0dec0022003, 0xc0dec0dec0011001},
},
// Invalid non-kernel PCs must fail test execution.
{
@@ -296,38 +276,49 @@ func testCover(t *testing.T, target *prog.Target) {
// TODO: test max signal filtering and cover filter when syz-executor handles them.
}
executor := csource.BuildExecutor(t, target, "../../")
+ source := queue.Plain()
+ startRpcserver(t, target, executor, source)
for i, test := range tests {
test := test
t.Run(fmt.Sprint(i), func(t *testing.T) {
t.Parallel()
- testCover1(t, target, executor, test)
+ testCover1(t, target, test, source)
})
}
}
-func testCover1(t *testing.T, target *prog.Target, executor string, test CoverTest) {
+func testCover1(t *testing.T, target *prog.Target, test CoverTest, source *queue.PlainQueue) {
text := fmt.Sprintf(`syz_inject_cover(0x%v, &AUTO="%s", AUTO)`, test.Is64Bit, hex.EncodeToString(test.Input))
p, err := target.Deserialize([]byte(text), prog.Strict)
if err != nil {
t.Fatal(err)
}
req := &queue.Request{
- Prog: p,
- Repeat: 1,
+ Prog: p,
ExecOpts: flatrpc.ExecOpts{
- EnvFlags: flatrpc.ExecEnvSignal,
+ EnvFlags: flatrpc.ExecEnvSignal | flatrpc.ExecEnvSandboxNone,
ExecFlags: test.Flags,
},
}
- res := runTest(req, executor)
+ if test.Flags&flatrpc.ExecFlagCollectSignal != 0 {
+ req.ReturnAllSignal = []int{0}
+ }
+ source.Submit(req)
+ res := req.Wait(context.Background())
if res.Err != nil || res.Info == nil || len(res.Info.Calls) != 1 || res.Info.Calls[0] == nil {
- t.Fatalf("program execution failed: %v\n%s", res.Err, res.Output)
+ t.Fatalf("program execution failed: status=%v err=%v\n%s", res.Status, res.Err, res.Output)
}
call := res.Info.Calls[0]
var comps [][2]uint64
for _, cmp := range call.Comps {
comps = append(comps, [2]uint64{cmp.Op1, cmp.Op2})
}
+ if test.Cover == nil {
+ test.Cover = []uint64{}
+ }
+ if test.Signal == nil {
+ test.Signal = []uint64{}
+ }
assert.Equal(t, test.Cover, call.Cover)
assert.Equal(t, test.Signal, call.Signal)
// Comparisons are reordered and order does not matter, so compare without order.
@@ -361,72 +352,38 @@ func makeComps(comps ...Comparison) []byte {
return w.Bytes()
}
-func runTest(req *queue.Request, executor string) *queue.Result {
- cfg := new(ipc.Config)
- sysTarget := targets.Get(req.Prog.Target.OS, req.Prog.Target.Arch)
- cfg.UseForkServer = sysTarget.ExecutorUsesForkServer
- cfg.Timeouts = sysTarget.Timeouts(1)
- cfg.Executor = executor
- env, err := ipc.MakeEnv(cfg, 0)
- if err != nil {
- return &queue.Result{
- Status: queue.ExecFailure,
- Err: fmt.Errorf("failed to create ipc env: %w", err),
- }
+func startRpcserver(t *testing.T, target *prog.Target, executor string, source queue.Source) {
+ ctx, done := context.WithCancel(context.Background())
+ cfg := &rpcserver.LocalConfig{
+ Config: rpcserver.Config{
+ Config: vminfo.Config{
+ Target: target,
+ Debug: *flagDebug,
+ Features: flatrpc.FeatureSandboxNone,
+ Sandbox: flatrpc.ExecEnvSandboxNone,
+ },
+ Procs: runtime.GOMAXPROCS(0),
+ Slowdown: 10, // to deflake slower tests
+ },
+ Executor: executor,
+ Dir: t.TempDir(),
+ Context: ctx,
+ GDB: *flagGDB,
}
- defer env.Close()
- ret := &queue.Result{Status: queue.Success}
- for run := 0; run < req.Repeat; run++ {
- if run%2 == 0 {
- // Recreate Env every few iterations, this allows to cover more paths.
- env.ForceRestart()
- }
- output, info, hanged, err := env.Exec(&req.ExecOpts, req.Prog)
- ret.Output = append(ret.Output, output...)
- if err != nil {
- return &queue.Result{
- Status: queue.ExecFailure,
- Err: fmt.Errorf("run %v: failed to run: %w", run, err),
- }
- }
- if hanged {
- return &queue.Result{
- Status: queue.ExecFailure,
- Err: fmt.Errorf("run %v: hanged", run),
- }
- }
- if run == 0 {
- ret.Info = info
- } else {
- ret.Info.Calls = append(ret.Info.Calls, info.Calls...)
- }
+ cfg.MachineChecked = func(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source {
+ cfg.Cover = true
+ return source
}
- return ret
-}
-
-func runTestC(req *queue.Request) *queue.Result {
- tmpDir, err := os.MkdirTemp("", "syz-runtest")
- if err != nil {
- return &queue.Result{
- Status: queue.ExecFailure,
- Err: fmt.Errorf("failed to create temp dir: %w", err),
+ errc := make(chan error)
+ go func() {
+ errc <- rpcserver.RunLocal(cfg)
+ }()
+ t.Cleanup(func() {
+ done()
+ if err := <-errc; err != nil {
+ t.Fatal(err)
}
- }
- defer os.RemoveAll(tmpDir)
- cmd := osutil.Command(req.BinaryFile)
- cmd.Dir = tmpDir
- // Tell ASAN to not mess with our NONFAILING.
- cmd.Env = append(append([]string{}, os.Environ()...), "ASAN_OPTIONS=handle_segv=0 allow_user_segv_handler=1")
- res := &queue.Result{}
- res.Output, res.Err = osutil.Run(20*time.Second, cmd)
- var verr *osutil.VerboseError
- if errors.As(res.Err, &verr) {
- // The process can legitimately do something like exit_group(1).
- // So we ignore the error and rely on the rest of the checks (e.g. syscall return values).
- res.Err = nil
- res.Output = verr.Output
- }
- return res
+ })
}
func TestParsing(t *testing.T) {