aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2024-06-10 11:06:30 +0200
committerDmitry Vyukov <dvyukov@google.com>2024-06-11 05:18:24 +0000
commit5f02070655b3c1f2ab50a82fd5f466aaeb7af44a (patch)
tree6cd193271272ef3357a2be86af98d1edf7889b73
parentb7d9eb04f4c510213e29f46db7eab4ec5c72a4ae (diff)
executor: add end-to-end coverage/signal/comparisons test
-rw-r--r--executor/common_test.h11
-rw-r--r--executor/executor.cc20
-rw-r--r--executor/executor_bsd.h5
-rw-r--r--executor/executor_darwin.h5
-rw-r--r--executor/executor_linux.h11
-rw-r--r--executor/executor_test.h25
-rw-r--r--executor/nocover.h5
-rw-r--r--pkg/runtest/run_test.go249
-rw-r--r--sys/targets/targets.go4
-rw-r--r--sys/test/exec.txt3
10 files changed, 321 insertions, 17 deletions
diff --git a/executor/common_test.h b/executor/common_test.h
index 971108df8..67585be9a 100644
--- a/executor/common_test.h
+++ b/executor/common_test.h
@@ -153,3 +153,14 @@ static long syz_test_fuzzer1(volatile long a, volatile long b, volatile long c)
}
#endif
+
+#if SYZ_EXECUTOR || __NR_syz_inject_cover
+static long syz_inject_cover(volatile long a, volatile long b, volatile long c)
+#if SYZ_EXECUTOR
+ ; // defined in executor_test.h
+#else
+{
+ return 0;
+}
+#endif
+#endif
diff --git a/executor/executor.cc b/executor/executor.cc
index 88a0963c8..a8cc2259f 100644
--- a/executor/executor.cc
+++ b/executor/executor.cc
@@ -218,8 +218,8 @@ const uint64 binary_format_stroct = 4;
const uint64 no_copyout = -1;
static int running;
-uint32 completed;
-bool is_kernel_64_bit = true;
+static uint32 completed;
+static bool is_kernel_64_bit = true;
static uint8* input_data;
@@ -1278,11 +1278,14 @@ void execute_call(thread_t* th)
static uint32 hash(uint32 a)
{
+ // For test OS we disable hashing for determinism and testability.
+#if !GOOS_test
a = (a ^ 61) ^ (a >> 16);
a = a + (a << 3);
a = a ^ (a >> 4);
a = a * 0x27d4eb2d;
a = a ^ (a >> 15);
+#endif
return a;
}
@@ -1577,21 +1580,12 @@ bool kcov_comparison_t::ignore() const
return true;
if (arg2 >= out_start && arg2 <= out_end)
return true;
-#if defined(GOOS_linux)
// Filter out kernel physical memory addresses.
// These are internal kernel comparisons and should not be interesting.
- // The range covers first 1TB of physical mapping.
- uint64 kmem_start = (uint64)0xffff880000000000ull;
- uint64 kmem_end = (uint64)0xffff890000000000ull;
- bool kptr1 = arg1 >= kmem_start && arg1 <= kmem_end;
- bool kptr2 = arg2 >= kmem_start && arg2 <= kmem_end;
+ bool kptr1 = is_kernel_data(arg1) || arg1 == 0;
+ bool kptr2 = is_kernel_data(arg2) || arg2 == 0;
if (kptr1 && kptr2)
return true;
- if (kptr1 && arg2 == 0)
- return true;
- if (kptr2 && arg1 == 0)
- return true;
-#endif
}
return !coverage_filter(pc);
}
diff --git a/executor/executor_bsd.h b/executor/executor_bsd.h
index 43c2a19a9..1f54e0f41 100644
--- a/executor/executor_bsd.h
+++ b/executor/executor_bsd.h
@@ -179,6 +179,11 @@ static void cover_collect(cover_t* cov)
cov->size = *(uint64*)cov->data;
}
+static bool is_kernel_data(uint64 addr)
+{
+ return false;
+}
+
static bool use_cover_edges(uint64 pc)
{
return true;
diff --git a/executor/executor_darwin.h b/executor/executor_darwin.h
index aeba30a1d..83fe90c45 100644
--- a/executor/executor_darwin.h
+++ b/executor/executor_darwin.h
@@ -122,6 +122,11 @@ static void cover_collect(cover_t* cov)
cov->pc_offset = trace->offset;
}
+static bool is_kernel_data(uint64 addr)
+{
+ return false;
+}
+
static bool use_cover_edges(uint64 pc)
{
return true;
diff --git a/executor/executor_linux.h b/executor/executor_linux.h
index bfd81776f..445a278e2 100644
--- a/executor/executor_linux.h
+++ b/executor/executor_linux.h
@@ -182,6 +182,17 @@ static bool use_cover_edges(uint32 pc)
return true;
}
+static bool is_kernel_data(uint64 addr)
+{
+#if GOARCH_386 || GOARCH_amd64
+ // This range corresponds to the first 1TB of the physical memory mapping,
+ // see Documentation/arch/x86/x86_64/mm.rst.
+ return addr >= 0xffff880000000000ull && addr < 0xffff890000000000ull;
+#else
+ return false;
+#endif
+}
+
static bool use_cover_edges(uint64 pc)
{
#if GOARCH_amd64 || GOARCH_arm64
diff --git a/executor/executor_test.h b/executor/executor_test.h
index dd133e422..f796aed04 100644
--- a/executor/executor_test.h
+++ b/executor/executor_test.h
@@ -54,12 +54,15 @@ static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
static void cover_reset(cover_t* cov)
{
- *(unsigned long*)(cov->data) = 0;
+ *(uint64*)(cov->data) = 0;
}
static void cover_collect(cover_t* cov)
{
- cov->size = *(unsigned long*)(cov->data);
+ if (is_kernel_64_bit)
+ cov->size = *(uint64*)cov->data;
+ else
+ cov->size = *(uint32*)cov->data;
}
static void cover_protect(cover_t* cov)
@@ -87,7 +90,25 @@ static void cover_unprotect(cover_t* cov)
{
}
+static bool is_kernel_data(uint64 addr)
+{
+ return addr >= 0xda1a0000 && addr <= 0xda1a1000;
+}
+
static bool use_cover_edges(uint64 pc)
{
return true;
}
+
+static long syz_inject_cover(volatile long a, volatile long b, volatile long c)
+{
+ cover_t* cov = &current_thread->cov;
+ if (cov->data == nullptr)
+ return ENOENT;
+ is_kernel_64_bit = a;
+ cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
+ uint32 size = std::min((uint32)c, cov->mmap_alloc_size);
+ memcpy(cov->data, (void*)b, size);
+ memset(cov->data + size, 0xcd, std::min<uint64>(100, cov->mmap_alloc_size - size));
+ return 0;
+}
diff --git a/executor/nocover.h b/executor/nocover.h
index 0ba7a56cc..ba26dd1d5 100644
--- a/executor/nocover.h
+++ b/executor/nocover.h
@@ -29,6 +29,11 @@ static void cover_unprotect(cover_t* cov)
{
}
+static bool is_kernel_data(uint64 addr)
+{
+ return false;
+}
+
static bool use_cover_edges(uint64 pc)
{
return true;
diff --git a/pkg/runtest/run_test.go b/pkg/runtest/run_test.go
index 8ccfb3ef6..d9af938dd 100644
--- a/pkg/runtest/run_test.go
+++ b/pkg/runtest/run_test.go
@@ -4,7 +4,10 @@
package runtest
import (
+ "bytes"
"context"
+ "encoding/binary"
+ "encoding/hex"
"errors"
"flag"
"fmt"
@@ -15,6 +18,7 @@ import (
"time"
"github.com/google/syzkaller/pkg/csource"
+ "github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
"github.com/google/syzkaller/pkg/ipc"
"github.com/google/syzkaller/pkg/osutil"
@@ -22,6 +26,7 @@ import (
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/targets"
_ "github.com/google/syzkaller/sys/test/gen" // pull in the test target
+ "github.com/stretchr/testify/assert"
)
// Can be used as:
@@ -104,6 +109,250 @@ func test(t *testing.T, sysTarget *targets.Target) {
}
}
+func TestCover(t *testing.T) {
+ // End-to-end test for coverage/signal/comparisons collection.
+ // We inject given blobs into KCOV buffer using syz_inject_cover,
+ // and then test what we get back.
+ t.Parallel()
+ for _, arch := range []string{targets.TestArch32, targets.TestArch64} {
+ sysTarget := targets.Get(targets.TestOS, arch)
+ t.Run(arch, func(t *testing.T) {
+ if sysTarget.BrokenCompiler != "" {
+ t.Skipf("skipping due to broken compiler:\n%v", sysTarget.BrokenCompiler)
+ }
+ target, err := prog.GetTarget(targets.TestOS, arch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Parallel()
+ testCover(t, target)
+ })
+ }
+}
+
+type CoverTest struct {
+ Is64Bit int
+ Input []byte
+ Flags flatrpc.ExecFlag
+ Cover []uint64
+ Signal []uint64
+ Comps [][2]uint64
+}
+
+type Comparison struct {
+ Type uint64
+ Arg1 uint64
+ Arg2 uint64
+ PC uint64
+}
+
+const (
+ CmpConst = 1
+ CmpSize1 = 0
+ CmpSize2 = 2
+ CmpSize4 = 4
+ CmpSize8 = 6
+)
+
+func testCover(t *testing.T, target *prog.Target) {
+ tests := []CoverTest{
+ // Empty coverage.
+ {
+ Is64Bit: 1,
+ Input: makeCover64(),
+ Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
+ },
+ {
+ Is64Bit: 0,
+ Input: makeCover64(),
+ Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
+ },
+ // Single 64-bit PC.
+ {
+ Is64Bit: 1,
+ Input: makeCover64(0xc0dec0dec0112233),
+ Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
+ Cover: []uint64{0xc0dec0dec0112233},
+ Signal: []uint64{0xc0dec0dec0112233},
+ },
+ // Single 32-bit PC.
+ {
+ Is64Bit: 0,
+ Input: makeCover32(0xc0112233),
+ Flags: flatrpc.ExecFlagCollectSignal | flatrpc.ExecFlagCollectCover,
+ Cover: []uint64{0xc0112233},
+ Signal: []uint64{0xc0112233},
+ },
+ // Ensure we don't sent cover/signal when not requested.
+ {
+ Is64Bit: 1,
+ Input: makeCover64(0xc0dec0dec0112233),
+ Flags: flatrpc.ExecFlagCollectCover,
+ Cover: []uint64{0xc0dec0dec0112233},
+ },
+ {
+ Is64Bit: 1,
+ Input: makeCover64(0xc0dec0dec0112233),
+ Flags: flatrpc.ExecFlagCollectSignal,
+ Signal: []uint64{0xc0dec0dec0112233},
+ },
+ // Coverage deduplication.
+ {
+ Is64Bit: 1,
+ Input: makeCover64(0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011,
+ 0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033, 0xc0dec0dec0000011),
+ Flags: flatrpc.ExecFlagCollectCover,
+ Cover: []uint64{0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011,
+ 0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033, 0xc0dec0dec0000011},
+ },
+ {
+ Is64Bit: 1,
+ Input: makeCover64(0xc0dec0dec0000033, 0xc0dec0dec0000022, 0xc0dec0dec0000011,
+ 0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033, 0xc0dec0dec0000011),
+ Flags: flatrpc.ExecFlagCollectCover | flatrpc.ExecFlagDedupCover,
+ Cover: []uint64{0xc0dec0dec0000011, 0xc0dec0dec0000022, 0xc0dec0dec0000033},
+ },
+ // Signal hashing.
+ {
+ Is64Bit: 1,
+ Input: makeCover64(0xc0dec0dec0011001, 0xc0dec0dec0022002, 0xc0dec0dec00330f0,
+ 0xc0dec0dec0044b00, 0xc0dec0dec0011001, 0xc0dec0dec0022002),
+ Flags: flatrpc.ExecFlagCollectSignal,
+ Signal: []uint64{0xc0dec0dec0011001, 0xc0dec0dec0022003, 0xc0dec0dec00330f2,
+ 0xc0dec0dec0044bf0, 0xc0dec0dec0011b01},
+ },
+ // 64-bit comparisons.
+ {
+ Is64Bit: 1,
+ Input: makeComps(
+ // A normal 8-byte comparison must be returned in the output as is.
+ Comparison{CmpSize8 | CmpConst, 0x1111111111111111, 0x2222222222222222, 0},
+ // Duplicate must be removed.
+ Comparison{CmpSize8 | CmpConst, 0x1111111111111111, 0x2222222222222222, 0},
+ // Non-const comparisons must be duplicated both ways.
+ Comparison{CmpSize8, 0x30, 0x31, 0},
+ // Test sign-extension for smaller argument types.
+ Comparison{CmpSize1 | CmpConst, 0xa3, 0x77, 0},
+ Comparison{CmpSize1 | CmpConst, 0xff10, 0xffe1, 0},
+ Comparison{CmpSize2 | CmpConst, 0xabcd, 0x4321, 0},
+ Comparison{CmpSize4 | CmpConst, 0xabcd1234, 0x4321, 0},
+ // Comparison with const 0 must be removed.
+ Comparison{CmpSize8 | CmpConst, 0, 0x2222222222222222, 0},
+ Comparison{CmpSize8, 0, 0x3333, 0},
+ // Comparison of equal values must be removed.
+ Comparison{CmpSize8, 0, 0, 0},
+ Comparison{CmpSize8, 0x1111, 0x1111, 0},
+ // Comparisons of kernel addresses must be removed.
+ Comparison{CmpSize8 | CmpConst, 0xda1a0000, 0xda1a1000, 0},
+ Comparison{CmpSize8, 0xda1a0000, 0, 0},
+ Comparison{CmpSize8, 0, 0xda1a0010, 0},
+ Comparison{CmpSize8 | CmpConst, 0xc0dec0dec0de0000, 0xc0dec0dec0de1000, 0},
+ // But not with something that's not a kernel address.
+ Comparison{CmpSize8 | CmpConst, 0xda1a0010, 0xabcd, 0},
+ ),
+ Flags: flatrpc.ExecFlagCollectComps,
+ Comps: [][2]uint64{
+ {0x2222222222222222, 0x1111111111111111},
+ {0x30, 0x31},
+ {0x31, 0x30},
+ {0x77, 0xffffffa3},
+ {0xffffffe1, 0x10},
+ {0x4321, 0xffffabcd},
+ {0x4321, 0xabcd1234},
+ {0x3333, 0},
+ {0, 0x3333},
+ {0xc0dec0dec0de1000, 0xc0dec0dec0de0000},
+ {0xabcd, 0xda1a0010},
+ },
+ },
+ // 32-bit comparisons must be the same, so test only a subset.
+ {
+ Is64Bit: 0,
+ Input: makeComps(
+ Comparison{CmpSize8 | CmpConst, 0x1111111111111111, 0x2222222222222222, 0},
+ Comparison{CmpSize2 | CmpConst, 0xabcd, 0x4321, 0},
+ Comparison{CmpSize4 | CmpConst, 0xda1a0000, 0xda1a1000, 0},
+ Comparison{CmpSize8 | CmpConst, 0xc0dec0dec0de0000, 0xc0dec0dec0de1000, 0},
+ Comparison{CmpSize4 | CmpConst, 0xc0de0000, 0xc0de1000, 0},
+ Comparison{CmpSize8 | CmpConst, 0xc0de0011, 0xc0de1022, 0},
+ ),
+ Flags: flatrpc.ExecFlagCollectComps,
+ Comps: [][2]uint64{
+ {0x2222222222222222, 0x1111111111111111},
+ {0x4321, 0xffffabcd},
+ {0xc0dec0dec0de1000, 0xc0dec0dec0de0000},
+ {0xc0de1000, 0xc0de0000},
+ {0xc0de1022, 0xc0de0011},
+ },
+ },
+ // TODO: test max signal filtering and cover filter when syz-executor handles them.
+ }
+ executor := csource.BuildExecutor(t, target, "../../")
+ for i, test := range tests {
+ test := test
+ t.Run(fmt.Sprint(i), func(t *testing.T) {
+ t.Parallel()
+ testCover1(t, target, executor, test)
+ })
+ }
+}
+
+func testCover1(t *testing.T, target *prog.Target, executor string, test CoverTest) {
+ text := fmt.Sprintf(`syz_inject_cover(0x%v, &AUTO="%s", AUTO)`, test.Is64Bit, hex.EncodeToString(test.Input))
+ p, err := target.Deserialize([]byte(text), prog.Strict)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := &queue.Request{
+ Prog: p,
+ Repeat: 1,
+ ExecOpts: flatrpc.ExecOpts{
+ EnvFlags: flatrpc.ExecEnvSignal,
+ ExecFlags: test.Flags,
+ },
+ }
+ res := runTest(req, executor)
+ if res.Info == nil || len(res.Info.Calls) != 1 || res.Info.Calls[0] == nil {
+ t.Fatalf("program execution failed: %v\n%s", res.Err, res.Output)
+ }
+ call := res.Info.Calls[0]
+ var comps [][2]uint64
+ for _, cmp := range call.Comps {
+ comps = append(comps, [2]uint64{cmp.Op1, cmp.Op2})
+ }
+ assert.Equal(t, test.Cover, call.Cover)
+ assert.Equal(t, test.Signal, call.Signal)
+ // Comparisons are reordered and order does not matter, so compare without order.
+ assert.ElementsMatch(t, test.Comps, comps)
+}
+
+func makeCover64(pcs ...uint64) []byte {
+ w := new(bytes.Buffer)
+ binary.Write(w, binary.NativeEndian, uint64(len(pcs)))
+ for _, pc := range pcs {
+ binary.Write(w, binary.NativeEndian, pc)
+ }
+ return w.Bytes()
+}
+
+func makeCover32(pcs ...uint32) []byte {
+ w := new(bytes.Buffer)
+ binary.Write(w, binary.NativeEndian, uint32(len(pcs)))
+ for _, pc := range pcs {
+ binary.Write(w, binary.NativeEndian, pc)
+ }
+ return w.Bytes()
+}
+
+func makeComps(comps ...Comparison) []byte {
+ w := new(bytes.Buffer)
+ binary.Write(w, binary.NativeEndian, uint64(len(comps)))
+ for _, cmp := range comps {
+ binary.Write(w, binary.NativeEndian, cmp)
+ }
+ return w.Bytes()
+}
+
func runTest(req *queue.Request, executor string) *queue.Result {
cfg := new(ipc.Config)
sysTarget := targets.Get(req.Prog.Target.OS, req.Prog.Target.Arch)
diff --git a/sys/targets/targets.go b/sys/targets/targets.go
index 4e3e37e66..6cf5d8b9e 100644
--- a/sys/targets/targets.go
+++ b/sys/targets/targets.go
@@ -225,7 +225,7 @@ var List = map[string]map[string]*Target{
PtrSize: 4,
PageSize: 8 << 10,
Int64Alignment: 4,
- CFlags: []string{"-static"},
+ CFlags: []string{"-m32", "-static"},
osCommon: osCommon{
SyscallNumbers: true,
Int64SyscallArgs: true,
@@ -236,7 +236,7 @@ var List = map[string]map[string]*Target{
TestArch32Fork: {
PtrSize: 4,
PageSize: 4 << 10,
- CFlags: []string{"-static-pie"},
+ CFlags: []string{"-m32", "-static-pie"},
osCommon: osCommon{
SyscallNumbers: true,
Int64SyscallArgs: true,
diff --git a/sys/test/exec.txt b/sys/test/exec.txt
index 67c0c70c0..4b43b57b0 100644
--- a/sys/test/exec.txt
+++ b/sys/test/exec.txt
@@ -11,6 +11,9 @@ syz_compare_int$3(n const[3], v0 intptr, v1 intptr, v2 intptr)
syz_compare_int$4(n const[4], v0 intptr, v1 intptr, v2 intptr, v3 intptr)
syz_compare_zlib(data ptr[in, array[int8]], size bytesize[data], zdata ptr[in, compressed_image], zsize bytesize[zdata]) (no_generate, no_minimize)
+# Copies the data into KCOV buffer verbatim and sets assumed kernel bitness.
+syz_inject_cover(is64 bool8, ptr ptr[in, array[int8]], size bytesize[ptr])
+
compare_data [
align0 align0
bf0 syz_bf_struct0