aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--executor/executor.cc26
-rw-r--r--pkg/flatrpc/flatrpc.fbs4
-rw-r--r--pkg/flatrpc/flatrpc.go43
-rw-r--r--pkg/flatrpc/flatrpc.h39
-rw-r--r--pkg/fuzzer/fuzzer.go9
-rw-r--r--pkg/fuzzer/job.go4
-rw-r--r--pkg/rpcserver/runner.go9
-rw-r--r--pkg/runtest/run_test.go80
-rw-r--r--prog/hints.go71
-rw-r--r--prog/hints_test.go82
-rw-r--r--prog/rand_test.go4
-rw-r--r--tools/syz-execprog/execprog.go2
-rw-r--r--tools/syz-mutate/mutate.go2
13 files changed, 271 insertions, 104 deletions
diff --git a/executor/executor.cc b/executor/executor.cc
index bcd75f612..5855ef9b4 100644
--- a/executor/executor.cc
+++ b/executor/executor.cc
@@ -462,7 +462,7 @@ static void copyin(char* addr, uint64 val, uint64 size, uint64 bf, uint64 bf_off
static bool copyout(char* addr, uint64 size, uint64* res);
static void setup_control_pipes();
static bool coverage_filter(uint64 pc);
-static std::tuple<rpc::ComparisonRaw, bool, bool> convert(const kcov_comparison_t& cmp);
+static rpc::ComparisonRaw convert(const kcov_comparison_t& cmp);
static flatbuffers::span<uint8_t> finish_output(OutputData* output, int proc_id, uint64 req_id,
uint64 elapsed, uint64 freshness, uint32 status, const std::vector<uint8_t>* process_output);
@@ -1102,29 +1102,23 @@ uint32 write_comparisons(flatbuffers::FlatBufferBuilder& fbb, cover_t* cov)
cover_unprotect(cov);
rpc::ComparisonRaw* start = (rpc::ComparisonRaw*)cov_start;
rpc::ComparisonRaw* end = start;
- // We will convert kcov_comparison_t to ComparisonRaw inplace
- // and potentially double number of elements, so ensure we have space.
- static_assert(sizeof(kcov_comparison_t) >= 2 * sizeof(rpc::ComparisonRaw));
+ // We will convert kcov_comparison_t to ComparisonRaw inplace.
+ static_assert(sizeof(kcov_comparison_t) >= sizeof(rpc::ComparisonRaw));
for (uint32 i = 0; i < ncomps; i++) {
- auto [raw, swap, ok] = convert(cov_start[i]);
- if (!ok)
+ auto raw = convert(cov_start[i]);
+ if (!raw.pc())
continue;
*end++ = raw;
- // Compiler marks comparisons with a const with KCOV_CMP_CONST flag.
- // If the flag is set, then we need to export only one order of operands
- // (because only one of them could potentially come from the input).
- // If the flag is not set, then we export both orders as both operands
- // could come from the input.
- if (swap)
- *end++ = {raw.op2(), raw.op1()};
}
std::sort(start, end, [](rpc::ComparisonRaw a, rpc::ComparisonRaw b) -> bool {
+ if (a.pc() != b.pc())
+ return a.pc() < b.pc();
if (a.op1() != b.op1())
return a.op1() < b.op1();
return a.op2() < b.op2();
});
ncomps = std::unique(start, end, [](rpc::ComparisonRaw a, rpc::ComparisonRaw b) -> bool {
- return a.op1() == b.op1() && a.op2() == b.op2();
+ return a.pc() == b.pc() && a.op1() == b.op1() && a.op2() == b.op2();
}) -
start;
cover_protect(cov);
@@ -1646,7 +1640,7 @@ uint64 read_input(uint8** input_posp, bool peek)
return v;
}
-std::tuple<rpc::ComparisonRaw, bool, bool> convert(const kcov_comparison_t& cmp)
+rpc::ComparisonRaw convert(const kcov_comparison_t& cmp)
{
if (cmp.type > (KCOV_CMP_CONST | KCOV_CMP_SIZE_MASK))
failmsg("invalid kcov comp type", "type=%llx", cmp.type);
@@ -1692,7 +1686,7 @@ std::tuple<rpc::ComparisonRaw, bool, bool> convert(const kcov_comparison_t& cmp)
// Prog package expects operands in the opposite order (first operand may come from the input,
// the second operand was computed in the kernel), so swap operands.
- return {{arg2, arg1}, !(cmp.type & KCOV_CMP_CONST), true};
+ return {cmp.pc, arg2, arg1, !!(cmp.type & KCOV_CMP_CONST)};
}
void failmsg(const char* err, const char* msg, ...)
diff --git a/pkg/flatrpc/flatrpc.fbs b/pkg/flatrpc/flatrpc.fbs
index 121b289e9..98fb2f5da 100644
--- a/pkg/flatrpc/flatrpc.fbs
+++ b/pkg/flatrpc/flatrpc.fbs
@@ -208,8 +208,12 @@ table CallInfoRaw {
}
struct ComparisonRaw {
+ pc :uint64;
op1 :uint64;
op2 :uint64;
+ // If is_const is set, op2 was a source code const (could not come from the input),
+ // otherwise both operands were dynamic and could come from the input.
+ is_const :bool;
}
table ProgInfoRaw {
diff --git a/pkg/flatrpc/flatrpc.go b/pkg/flatrpc/flatrpc.go
index 79c0d6cf2..aa8970ba9 100644
--- a/pkg/flatrpc/flatrpc.go
+++ b/pkg/flatrpc/flatrpc.go
@@ -2594,7 +2594,7 @@ func (rcv *CallInfoRaw) Comps(obj *ComparisonRaw, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
x := rcv._tab.Vector(o)
- x += flatbuffers.UOffsetT(j) * 16
+ x += flatbuffers.UOffsetT(j) * 32
obj.Init(rcv._tab.Bytes, x)
return true
}
@@ -2634,26 +2634,30 @@ func CallInfoRawAddComps(builder *flatbuffers.Builder, comps flatbuffers.UOffset
builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(comps), 0)
}
func CallInfoRawStartCompsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
- return builder.StartVector(16, numElems, 8)
+ return builder.StartVector(32, numElems, 8)
}
func CallInfoRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
type ComparisonRawT struct {
- Op1 uint64 `json:"op1"`
- Op2 uint64 `json:"op2"`
+ Pc uint64 `json:"pc"`
+ Op1 uint64 `json:"op1"`
+ Op2 uint64 `json:"op2"`
+ IsConst bool `json:"is_const"`
}
func (t *ComparisonRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
if t == nil {
return 0
}
- return CreateComparisonRaw(builder, t.Op1, t.Op2)
+ return CreateComparisonRaw(builder, t.Pc, t.Op1, t.Op2, t.IsConst)
}
func (rcv *ComparisonRaw) UnPackTo(t *ComparisonRawT) {
+ t.Pc = rcv.Pc()
t.Op1 = rcv.Op1()
t.Op2 = rcv.Op2()
+ t.IsConst = rcv.IsConst()
}
func (rcv *ComparisonRaw) UnPack() *ComparisonRawT {
@@ -2678,24 +2682,41 @@ func (rcv *ComparisonRaw) Table() flatbuffers.Table {
return rcv._tab.Table
}
-func (rcv *ComparisonRaw) Op1() uint64 {
+func (rcv *ComparisonRaw) Pc() uint64 {
return rcv._tab.GetUint64(rcv._tab.Pos + flatbuffers.UOffsetT(0))
}
-func (rcv *ComparisonRaw) MutateOp1(n uint64) bool {
+func (rcv *ComparisonRaw) MutatePc(n uint64) bool {
return rcv._tab.MutateUint64(rcv._tab.Pos+flatbuffers.UOffsetT(0), n)
}
-func (rcv *ComparisonRaw) Op2() uint64 {
+func (rcv *ComparisonRaw) Op1() uint64 {
return rcv._tab.GetUint64(rcv._tab.Pos + flatbuffers.UOffsetT(8))
}
-func (rcv *ComparisonRaw) MutateOp2(n uint64) bool {
+func (rcv *ComparisonRaw) MutateOp1(n uint64) bool {
return rcv._tab.MutateUint64(rcv._tab.Pos+flatbuffers.UOffsetT(8), n)
}
-func CreateComparisonRaw(builder *flatbuffers.Builder, op1 uint64, op2 uint64) flatbuffers.UOffsetT {
- builder.Prep(8, 16)
+func (rcv *ComparisonRaw) Op2() uint64 {
+ return rcv._tab.GetUint64(rcv._tab.Pos + flatbuffers.UOffsetT(16))
+}
+func (rcv *ComparisonRaw) MutateOp2(n uint64) bool {
+ return rcv._tab.MutateUint64(rcv._tab.Pos+flatbuffers.UOffsetT(16), n)
+}
+
+func (rcv *ComparisonRaw) IsConst() bool {
+ return rcv._tab.GetBool(rcv._tab.Pos + flatbuffers.UOffsetT(24))
+}
+func (rcv *ComparisonRaw) MutateIsConst(n bool) bool {
+ return rcv._tab.MutateBool(rcv._tab.Pos+flatbuffers.UOffsetT(24), n)
+}
+
+func CreateComparisonRaw(builder *flatbuffers.Builder, pc uint64, op1 uint64, op2 uint64, isConst bool) flatbuffers.UOffsetT {
+ builder.Prep(8, 32)
+ builder.Pad(7)
+ builder.PrependBool(isConst)
builder.PrependUint64(op2)
builder.PrependUint64(op1)
+ builder.PrependUint64(pc)
return builder.Offset()
}
diff --git a/pkg/flatrpc/flatrpc.h b/pkg/flatrpc/flatrpc.h
index 7ce247d2e..8be575885 100644
--- a/pkg/flatrpc/flatrpc.h
+++ b/pkg/flatrpc/flatrpc.h
@@ -675,17 +675,39 @@ FLATBUFFERS_STRUCT_END(ExecOptsRaw, 24);
FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(8) ComparisonRaw FLATBUFFERS_FINAL_CLASS {
private:
+ uint64_t pc_;
uint64_t op1_;
uint64_t op2_;
+ uint8_t is_const_;
+ int8_t padding0__; int16_t padding1__; int32_t padding2__;
public:
ComparisonRaw()
- : op1_(0),
- op2_(0) {
- }
- ComparisonRaw(uint64_t _op1, uint64_t _op2)
- : op1_(flatbuffers::EndianScalar(_op1)),
- op2_(flatbuffers::EndianScalar(_op2)) {
+ : pc_(0),
+ op1_(0),
+ op2_(0),
+ is_const_(0),
+ padding0__(0),
+ padding1__(0),
+ padding2__(0) {
+ (void)padding0__;
+ (void)padding1__;
+ (void)padding2__;
+ }
+ ComparisonRaw(uint64_t _pc, uint64_t _op1, uint64_t _op2, bool _is_const)
+ : pc_(flatbuffers::EndianScalar(_pc)),
+ op1_(flatbuffers::EndianScalar(_op1)),
+ op2_(flatbuffers::EndianScalar(_op2)),
+ is_const_(flatbuffers::EndianScalar(static_cast<uint8_t>(_is_const))),
+ padding0__(0),
+ padding1__(0),
+ padding2__(0) {
+ (void)padding0__;
+ (void)padding1__;
+ (void)padding2__;
+ }
+ uint64_t pc() const {
+ return flatbuffers::EndianScalar(pc_);
}
uint64_t op1() const {
return flatbuffers::EndianScalar(op1_);
@@ -693,8 +715,11 @@ FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(8) ComparisonRaw FLATBUFFERS_FINAL_CLASS {
uint64_t op2() const {
return flatbuffers::EndianScalar(op2_);
}
+ bool is_const() const {
+ return flatbuffers::EndianScalar(is_const_) != 0;
+ }
};
-FLATBUFFERS_STRUCT_END(ComparisonRaw, 16);
+FLATBUFFERS_STRUCT_END(ComparisonRaw, 32);
struct ConnectRequestRawT : public flatbuffers::NativeTable {
typedef ConnectRequestRaw TableType;
diff --git a/pkg/fuzzer/fuzzer.go b/pkg/fuzzer/fuzzer.go
index a2b2ef475..7ac8cba3e 100644
--- a/pkg/fuzzer/fuzzer.go
+++ b/pkg/fuzzer/fuzzer.go
@@ -24,10 +24,11 @@ type Fuzzer struct {
Config *Config
Cover *Cover
- ctx context.Context
- mu sync.Mutex
- rnd *rand.Rand
- target *prog.Target
+ ctx context.Context
+ mu sync.Mutex
+ rnd *rand.Rand
+ target *prog.Target
+ hintsLimiter prog.HintsLimiter
ct *prog.ChoiceTable
ctProgs int
diff --git a/pkg/fuzzer/job.go b/pkg/fuzzer/job.go
index 93d1cc354..99ff3c433 100644
--- a/pkg/fuzzer/job.go
+++ b/pkg/fuzzer/job.go
@@ -456,7 +456,7 @@ func (job *hintsJob) run(fuzzer *Fuzzer) {
}
got := make(prog.CompMap)
for _, cmp := range result.Info.Calls[job.call].Comps {
- got.AddComp(cmp.Op1, cmp.Op2)
+ got.Add(cmp.Pc, cmp.Op1, cmp.Op2, cmp.IsConst)
}
if i == 0 {
comps = got
@@ -465,6 +465,8 @@ func (job *hintsJob) run(fuzzer *Fuzzer) {
}
}
+ fuzzer.hintsLimiter.Limit(comps)
+
// Then mutate the initial program for every match between
// a syscall argument and a comparison operand.
// Execute each of such mutants to check if it gives new coverage.
diff --git a/pkg/rpcserver/runner.go b/pkg/rpcserver/runner.go
index 691a5b5d5..21b270421 100644
--- a/pkg/rpcserver/runner.go
+++ b/pkg/rpcserver/runner.go
@@ -420,6 +420,15 @@ func (runner *Runner) convertCallInfo(call *flatrpc.CallInfo) {
call.Cover = runner.canonicalizer.Canonicalize(call.Cover)
call.Signal = runner.canonicalizer.Canonicalize(call.Signal)
+ call.Comps = slices.DeleteFunc(call.Comps, func(cmp *flatrpc.Comparison) bool {
+ converted := runner.canonicalizer.Canonicalize([]uint64{cmp.Pc})
+ if len(converted) == 0 {
+ return true
+ }
+ cmp.Pc = converted[0]
+ return false
+ })
+
// Check signal belongs to kernel addresses.
// Mismatching addresses can mean either corrupted VM memory, or that the fuzzer somehow
// managed to inject output signal. If we see any bogus signal, drop whole signal
diff --git a/pkg/runtest/run_test.go b/pkg/runtest/run_test.go
index 1458a1c9a..3da87dd6c 100644
--- a/pkg/runtest/run_test.go
+++ b/pkg/runtest/run_test.go
@@ -151,7 +151,7 @@ type CoverTest struct {
Flags flatrpc.ExecFlag
Cover []uint64
Signal []uint64
- Comps [][2]uint64
+ Comps []*flatrpc.Comparison
}
type Comparison struct {
@@ -252,60 +252,58 @@ func testCover(t *testing.T, target *prog.Target) {
Is64Bit: true,
Input: makeComps(
// A normal 8-byte comparison must be returned in the output as is.
- Comparison{CmpSize8 | CmpConst, 0x1111111111111111, 0x2222222222222222, 0},
+ Comparison{CmpSize8 | CmpConst, 0x1111111111111111, 0x2222222222222222, 1},
// Duplicate must be removed.
- Comparison{CmpSize8 | CmpConst, 0x1111111111111111, 0x2222222222222222, 0},
+ Comparison{CmpSize8 | CmpConst, 0x1111111111111111, 0x2222222222222222, 1},
// Non-const comparisons must be duplicated both ways.
- Comparison{CmpSize8, 0x30, 0x31, 0},
+ Comparison{CmpSize8, 0x30, 0x31, 1},
// Test sign-extension for smaller argument types.
- Comparison{CmpSize1 | CmpConst, 0xa3, 0x77, 0},
- Comparison{CmpSize1 | CmpConst, 0xff10, 0xffe1, 0},
- Comparison{CmpSize2 | CmpConst, 0xabcd, 0x4321, 0},
- Comparison{CmpSize4 | CmpConst, 0xabcd1234, 0x4321, 0},
+ Comparison{CmpSize1 | CmpConst, 0xa3, 0x77, 1},
+ Comparison{CmpSize1 | CmpConst, 0xff10, 0xffe1, 1},
+ Comparison{CmpSize2 | CmpConst, 0xabcd, 0x4321, 1},
+ Comparison{CmpSize4 | CmpConst, 0xabcd1234, 0x4321, 1},
// Comparison with const 0 must be removed.
- Comparison{CmpSize8 | CmpConst, 0, 0x2222222222222222, 0},
- Comparison{CmpSize8, 0, 0x3333, 0},
+ Comparison{CmpSize8 | CmpConst, 0, 0x2222222222222222, 1},
+ Comparison{CmpSize8, 0, 0x3333, 1},
// Comparison of equal values must be removed.
- Comparison{CmpSize8, 0, 0, 0},
- Comparison{CmpSize8, 0x1111, 0x1111, 0},
+ Comparison{CmpSize8, 0, 0, 1},
+ Comparison{CmpSize8, 0x1111, 0x1111, 1},
// Comparisons of kernel addresses must be removed.
- Comparison{CmpSize8 | CmpConst, 0xda1a0000, 0xda1a1000, 0},
- Comparison{CmpSize8, 0xda1a0000, 0, 0},
- Comparison{CmpSize8, 0, 0xda1a0010, 0},
- Comparison{CmpSize8 | CmpConst, 0xc0dec0dec0de0000, 0xc0dec0dec0de1000, 0},
+ Comparison{CmpSize8 | CmpConst, 0xda1a0000, 0xda1a1000, 1},
+ Comparison{CmpSize8, 0xda1a0000, 0, 1},
+ Comparison{CmpSize8, 0, 0xda1a0010, 1},
+ Comparison{CmpSize8 | CmpConst, 0xc0dec0dec0de0000, 0xc0dec0dec0de1000, 1},
// But not with something that's not a kernel address.
- Comparison{CmpSize8 | CmpConst, 0xda1a0010, 0xabcd, 0},
+ Comparison{CmpSize8 | CmpConst, 0xda1a0010, 0xabcd, 1},
),
Flags: flatrpc.ExecFlagCollectComps,
- Comps: [][2]uint64{
- {0x2222222222222222, 0x1111111111111111},
- {0x30, 0x31},
- {0x31, 0x30},
- {0x77, 0xffffffffffffffa3},
- {0xffffffffffffffe1, 0x10},
- {0x4321, 0xffffffffffffabcd},
- {0x4321, 0xffffffffabcd1234},
- {0x3333, 0},
- {0, 0x3333},
- {0xabcd, 0xda1a0010},
+ Comps: []*flatrpc.Comparison{
+ {Pc: 1, Op1: 0x2222222222222222, Op2: 0x1111111111111111, IsConst: true},
+ {Pc: 1, Op1: 0x31, Op2: 0x30, IsConst: false},
+ {Pc: 1, Op1: 0x77, Op2: 0xffffffffffffffa3, IsConst: true},
+ {Pc: 1, Op1: 0xffffffffffffffe1, Op2: 0x10, IsConst: true},
+ {Pc: 1, Op1: 0x4321, Op2: 0xffffffffffffabcd, IsConst: true},
+ {Pc: 1, Op1: 0x4321, Op2: 0xffffffffabcd1234, IsConst: true},
+ {Pc: 1, Op1: 0x3333, Op2: 0, IsConst: false},
+ {Pc: 1, Op1: 0xabcd, Op2: 0xda1a0010, IsConst: true},
},
},
// 32-bit comparisons must be the same, so test only a subset.
{
Is64Bit: false,
Input: makeComps(
- Comparison{CmpSize8 | CmpConst, 0x1111111111111111, 0x2222222222222222, 0},
- Comparison{CmpSize2 | CmpConst, 0xabcd, 0x4321, 0},
- Comparison{CmpSize4 | CmpConst, 0xda1a0000, 0xda1a1000, 0},
- Comparison{CmpSize8 | CmpConst, 0xc0dec0dec0de0000, 0xc0dec0dec0de1000, 0},
- Comparison{CmpSize4 | CmpConst, 0xc0de0000, 0xc0de1000, 0},
- Comparison{CmpSize4 | CmpConst, 0xc0de0011, 0xc0de1022, 0},
+ Comparison{CmpSize8 | CmpConst, 0x1111111111111111, 0x2222222222222222, 1},
+ Comparison{CmpSize2 | CmpConst, 0xabcd, 0x4321, 2},
+ Comparison{CmpSize4 | CmpConst, 0xda1a0000, 0xda1a1000, 1},
+ Comparison{CmpSize8 | CmpConst, 0xc0dec0dec0de0000, 0xc0dec0dec0de1000, 3},
+ Comparison{CmpSize4 | CmpConst, 0xc0de0000, 0xc0de1000, 1},
+ Comparison{CmpSize4 | CmpConst, 0xc0de0011, 0xc0de1022, 1},
),
Flags: flatrpc.ExecFlagCollectComps,
- Comps: [][2]uint64{
- {0x2222222222222222, 0x1111111111111111},
- {0x4321, 0xffffffffffffabcd},
- {0xc0dec0dec0de1000, 0xc0dec0dec0de0000},
+ Comps: []*flatrpc.Comparison{
+ {Pc: 1, Op1: 0x2222222222222222, Op2: 0x1111111111111111, IsConst: true},
+ {Pc: 2, Op1: 0x4321, Op2: 0xffffffffffffabcd, IsConst: true},
+ {Pc: 3, Op1: 0xc0dec0dec0de1000, Op2: 0xc0dec0dec0de0000, IsConst: true},
},
},
// Test max signal.
@@ -405,10 +403,6 @@ func testCover1(t *testing.T, ctx context.Context, target *prog.Target, test Cov
t.Fatalf("program execution failed: status=%v err=%v\n%s", res.Status, res.Err, res.Output)
}
call := res.Info.Calls[0]
- var comps [][2]uint64
- for _, cmp := range call.Comps {
- comps = append(comps, [2]uint64{cmp.Op1, cmp.Op2})
- }
if test.Cover == nil {
test.Cover = []uint64{}
}
@@ -418,7 +412,7 @@ func testCover1(t *testing.T, ctx context.Context, target *prog.Target, test Cov
assert.Equal(t, test.Cover, call.Cover)
assert.Equal(t, test.Signal, call.Signal)
// Comparisons are reordered and order does not matter, so compare without order.
- assert.ElementsMatch(t, test.Comps, comps)
+ assert.ElementsMatch(t, test.Comps, call.Comps)
}
func makeCover64(pcs ...uint64) []byte {
diff --git a/prog/hints.go b/prog/hints.go
index ce83c009f..c054b9852 100644
--- a/prog/hints.go
+++ b/prog/hints.go
@@ -23,18 +23,13 @@ import (
"encoding/binary"
"fmt"
"sort"
+ "sync"
"github.com/google/syzkaller/pkg/image"
)
-// Example: for comparisons {(op1, op2), (op1, op3), (op1, op4), (op2, op1)}
-// this map will store the following:
-//
-// m = {
-// op1: {map[op2]: true, map[op3]: true, map[op4]: true},
-// op2: {map[op1]: true}
-// }.
-type CompMap map[uint64]map[uint64]bool
+// CompMap maps comparison operand that could come from the input to the second operand to the PC.
+type CompMap map[uint64]map[uint64]map[uint64]bool
const (
maxDataLength = 100
@@ -42,11 +37,18 @@ const (
var specialIntsSet map[uint64]bool
-func (m CompMap) AddComp(arg1, arg2 uint64) {
+func (m CompMap) Add(pc, arg1, arg2 uint64, isConst bool) {
if _, ok := m[arg1]; !ok {
- m[arg1] = make(map[uint64]bool)
+ m[arg1] = make(map[uint64]map[uint64]bool)
+ }
+ if _, ok := m[arg1][arg2]; !ok {
+ m[arg1][arg2] = make(map[uint64]bool)
+ }
+ m[arg1][arg2][pc] = true
+ if !isConst {
+ // Both operands could come from the input.
+ m.Add(pc, arg2, arg1, true)
}
- m[arg1][arg2] = true
}
func (m CompMap) String() string {
@@ -66,8 +68,13 @@ func (m CompMap) String() string {
// InplaceIntersect() only leaves the value pairs that are also present in other.
func (m CompMap) InplaceIntersect(other CompMap) {
for val1, nested := range m {
- for val2 := range nested {
- if !other[val1][val2] {
+ for val2, pcs := range nested {
+ for pc := range pcs {
+ if !other[val1][val2][pc] {
+ delete(pcs, pc)
+ }
+ }
+ if len(pcs) == 0 {
delete(nested, val2)
}
}
@@ -373,6 +380,44 @@ func shrinkExpand(v uint64, compMap CompMap, bitsize uint64, image bool) []uint6
return res
}
+type HintsLimiter struct {
+ mu sync.Mutex
+ attempts map[uint64]int // replacement attempts per PC
+}
+
+// Limit restricts hints to at most N replacement attempts per single kernel PC
+// (globally, across all hints mutations for all programs).
+// We are getting too many generated candidates, the fuzzer may not keep up
+// with them at all (hints jobs keep growing infinitely). If a hint indeed came
+// from the input w/o transformation, then we should guess it on the first
+// attempt (or at least after few attempts). If it did not come from the input,
+// or came with a non-trivial transformation, then any number of attempts won't
+// help. So limit the total number of attempts (until the next restart).
+func (limiter *HintsLimiter) Limit(comps CompMap) {
+ const N = 10
+ limiter.mu.Lock()
+ defer limiter.mu.Unlock()
+ if limiter.attempts == nil {
+ limiter.attempts = make(map[uint64]int)
+ }
+ for op1, ops2 := range comps {
+ for op2, pcs := range ops2 {
+ for pc := range pcs {
+ limiter.attempts[pc]++
+ if limiter.attempts[pc] > N {
+ delete(pcs, pc)
+ }
+ }
+ if len(pcs) == 0 {
+ delete(ops2, op2)
+ }
+ }
+ if len(ops2) == 0 {
+ delete(comps, op1)
+ }
+ }
+}
+
func init() {
specialIntsSet = make(map[uint64]bool)
for _, v := range specialInts {
diff --git a/prog/hints_test.go b/prog/hints_test.go
index d5a5a1461..44c24bfb8 100644
--- a/prog/hints_test.go
+++ b/prog/hints_test.go
@@ -662,7 +662,7 @@ func TestHintsRandom(t *testing.T) {
}
comps := make(CompMap)
for v := range vals {
- comps.AddComp(v, r.randInt64())
+ comps.Add(1, v, r.randInt64(), true)
}
p.MutateWithHints(i, comps, func(p1 *Prog) bool { return true })
}
@@ -772,7 +772,7 @@ func BenchmarkHints(b *testing.B) {
}
comps[i] = make(CompMap)
for v := range vals {
- comps[i].AddComp(v, r.randInt64())
+ comps[i].Add(1, v, r.randInt64(), true)
}
}
b.RunParallel(func(pb *testing.PB) {
@@ -784,10 +784,82 @@ func BenchmarkHints(b *testing.B) {
})
}
-func compSet(vals ...uint64) map[uint64]bool {
- m := make(map[uint64]bool)
+func TestHintsLimiter(t *testing.T) {
+ var limiter HintsLimiter
+
+ // Base case.
+ comps := make(CompMap)
+ comps.Add(1000, 1000, 1100, true)
+ for i := uint64(0); i < 9; i++ {
+ comps.Add(2000, 2000+i, 2100+i, true)
+ }
+ for i := uint64(0); i < 10; i++ {
+ comps.Add(3000, 3000+i, 3100+i, true)
+ }
+ for i := uint64(0); i < 11; i++ {
+ comps.Add(4000, 4000+i, 4100+i, true)
+ }
+ for i := uint64(0); i < 20; i++ {
+ comps.Add(5000, 5000+i, 5100+i, true)
+ }
+ assert.Equal(t, perPCCount(comps), map[uint64]int{
+ 1000: 1,
+ 2000: 9,
+ 3000: 10,
+ 4000: 11,
+ 5000: 20,
+ })
+ limiter.Limit(comps)
+ assert.Equal(t, perPCCount(comps), map[uint64]int{
+ 1000: 1,
+ 2000: 9,
+ 3000: 10,
+ 4000: 10,
+ 5000: 10,
+ })
+
+ // Test that counts are accumulated in the limiter.
+ comps = make(CompMap)
+ for i := uint64(0); i < 3; i++ {
+ comps.Add(1000, 1000+i, 1100+i, true)
+ }
+ for i := uint64(0); i < 3; i++ {
+ comps.Add(2000, 2000+i, 2100+i, true)
+ }
+ for i := uint64(0); i < 3; i++ {
+ comps.Add(3000, 3000+i, 3100+i, true)
+ }
+ assert.Equal(t, perPCCount(comps), map[uint64]int{
+ 1000: 3,
+ 2000: 3,
+ 3000: 3,
+ })
+ limiter.Limit(comps)
+ assert.Equal(t, perPCCount(comps), map[uint64]int{
+ 1000: 3,
+ 2000: 1,
+ })
+}
+
+func perPCCount(comps CompMap) map[uint64]int {
+ res := make(map[uint64]int)
+ for _, ops2 := range comps {
+ for _, pcs := range ops2 {
+ for pc := range pcs {
+ res[pc]++
+ }
+ }
+ }
+ return res
+}
+
+func compSet(vals ...uint64) map[uint64]map[uint64]bool {
+ m := make(map[uint64]map[uint64]bool)
for _, v := range vals {
- m[v] = true
+ if m[v] == nil {
+ m[v] = make(map[uint64]bool)
+ }
+ m[v][1] = true
}
return m
}
diff --git a/prog/rand_test.go b/prog/rand_test.go
index 53b8ea338..d1e963595 100644
--- a/prog/rand_test.go
+++ b/prog/rand_test.go
@@ -60,8 +60,8 @@ func generateProg(t *testing.T, target *Target, rs rand.Source, ct *ChoiceTable,
for i, c := range p.Calls {
comps := make(CompMap)
for v := range extractValues(c) {
- comps.AddComp(v, v+1)
- comps.AddComp(v, v+10)
+ comps.Add(1, v, v+1, true)
+ comps.Add(1, v, v+10, true)
}
// If unbounded, this code may take O(N^2) time to complete.
// Since large programs are not uncommon, let's limit the number of hint iterations.
diff --git a/tools/syz-execprog/execprog.go b/tools/syz-execprog/execprog.go
index 8fce0d961..e86d09053 100644
--- a/tools/syz-execprog/execprog.go
+++ b/tools/syz-execprog/execprog.go
@@ -288,7 +288,7 @@ func (ctx *Context) printHints(p *prog.Prog, info *flatrpc.ProgInfo) {
}
comps := make(prog.CompMap)
for _, cmp := range info.Calls[i].Comps {
- comps.AddComp(cmp.Op1, cmp.Op2)
+ comps.Add(cmp.Pc, cmp.Op1, cmp.Op2, cmp.IsConst)
if ctx.output {
fmt.Printf("comp 0x%x ? 0x%x\n", cmp.Op1, cmp.Op2)
}
diff --git a/tools/syz-mutate/mutate.go b/tools/syz-mutate/mutate.go
index 101ce0739..351d0ae6a 100644
--- a/tools/syz-mutate/mutate.go
+++ b/tools/syz-mutate/mutate.go
@@ -88,7 +88,7 @@ func main() {
}
if *flagHintCall != -1 {
comps := make(prog.CompMap)
- comps.AddComp(*flagHintSrc, *flagHintCmp)
+ comps.Add(0, *flagHintSrc, *flagHintCmp, true)
p.MutateWithHints(*flagHintCall, comps, func(p *prog.Prog) bool {
fmt.Printf("%s\n\n", p.Serialize())
return true