aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--executor/common.h14
-rw-r--r--executor/executor.cc182
-rw-r--r--pkg/csource/common.go10
-rw-r--r--pkg/csource/csource.go11
-rw-r--r--pkg/csource/csource_test.go5
-rw-r--r--pkg/csource/generated.go14
-rw-r--r--pkg/csource/options.go4
-rw-r--r--pkg/csource/options_test.go10
-rw-r--r--pkg/instance/instance_test.go11
-rw-r--r--pkg/ipc/ipc.go4
-rw-r--r--pkg/ipc/ipc_test.go2
-rw-r--r--pkg/ipc/ipcconfig/ipcconfig.go5
-rw-r--r--pkg/repro/repro.go9
-rw-r--r--pkg/repro/repro_test.go1
-rw-r--r--pkg/runtest/run.go5
-rw-r--r--prog/analysis.go29
-rw-r--r--prog/collide.go57
-rw-r--r--prog/collide_test.go84
-rw-r--r--prog/decodeexec.go4
-rw-r--r--prog/encoding.go3
-rw-r--r--prog/encoding_test.go6
-rw-r--r--prog/encodingexec.go8
-rw-r--r--prog/encodingexec_test.go16
-rw-r--r--prog/minimization.go30
-rw-r--r--prog/minimization_test.go22
-rw-r--r--prog/parse_test.go4
-rw-r--r--prog/prog.go3
-rw-r--r--syz-fuzzer/fuzzer.go2
-rw-r--r--syz-fuzzer/proc.go65
-rw-r--r--tools/syz-execprog/execprog.go16
-rw-r--r--tools/syz-prog2c/prog2c.go2
-rw-r--r--tools/syz-reprolist/reprolist.go4
32 files changed, 436 insertions, 206 deletions
diff --git a/executor/common.h b/executor/common.h
index 1e6eca5ae..95888de80 100644
--- a/executor/common.h
+++ b/executor/common.h
@@ -514,10 +514,6 @@ static void loop(void)
fprintf(stderr, "### start\n");
#endif
int i, call, thread;
-#if SYZ_COLLIDE
- int collide = 0;
-again:
-#endif
for (call = 0; call < /*{{{NUM_CALLS}}}*/; call++) {
for (thread = 0; thread < (int)(sizeof(threads) / sizeof(threads[0])); thread++) {
struct thread_t* th = &threads[thread];
@@ -534,8 +530,8 @@ again:
th->call = call;
__atomic_fetch_add(&running, 1, __ATOMIC_RELAXED);
event_set(&th->ready);
-#if SYZ_COLLIDE
- if (collide && (call % 2) == 0)
+#if SYZ_ASYNC
+ if (/*{{{ASYNC_CONDITIONS}}}*/)
break;
#endif
event_timedwait(&th->done, /*{{{CALL_TIMEOUT_MS}}}*/);
@@ -547,12 +543,6 @@ again:
#if SYZ_HAVE_CLOSE_FDS
close_fds();
#endif
-#if SYZ_COLLIDE
- if (!collide) {
- collide = 1;
- goto again;
- }
-#endif
}
#endif
diff --git a/executor/executor.cc b/executor/executor.cc
index 3b4d93eba..01b19b81e 100644
--- a/executor/executor.cc
+++ b/executor/executor.cc
@@ -131,8 +131,9 @@ const uint64 kOutputBase = 0x1b2bc20000ull;
// the amount we might possibly need for the specific received prog.
const int kMaxOutputComparisons = 14 << 20; // executions with comparsions enabled are usually < 1% of all executions
const int kMaxOutputCoverage = 6 << 20; // coverage is needed in ~ up to 1/3 of all executions (depending on corpus rotation)
-const int kMaxOutputSignal = 4 << 20; // signal collection is always required
-const int kInitialOutput = kMaxOutputSignal; // the minimal size to be allocated in the parent process
+const int kMaxOutputSignal = 4 << 20;
+const int kMinOutput = 256 << 10; // if we don't need to send signal, the output is rather short.
+const int kInitialOutput = kMinOutput; // the minimal size to be allocated in the parent process
#else
// We don't fork and allocate the memory only once, so prepare for the worst case.
const int kInitialOutput = 14 << 20;
@@ -174,9 +175,9 @@ static bool flag_wifi;
static bool flag_delay_kcov_mmap;
static bool flag_collect_cover;
+static bool flag_collect_signal;
static bool flag_dedup_cover;
static bool flag_threaded;
-static bool flag_collide;
static bool flag_coverage_filter;
// If true, then executor should write the comparisons data to fuzzer.
@@ -212,7 +213,6 @@ const uint64 binary_format_stroct = 4;
const uint64 no_copyout = -1;
static int running;
-static bool collide;
uint32 completed;
bool is_kernel_64_bit = true;
@@ -263,7 +263,6 @@ struct thread_t {
event_t done;
uint64* copyout_pos;
uint64 copyout_index;
- bool colliding;
bool executing;
int call_index;
int call_num;
@@ -368,7 +367,7 @@ struct feature_t {
void (*setup)();
};
-static thread_t* schedule_call(int call_index, int call_num, bool colliding, uint64 copyout_index, uint64 num_args, uint64* args, uint64* pos, call_props_t call_props);
+static thread_t* schedule_call(int call_index, int call_num, uint64 copyout_index, uint64 num_args, uint64* args, uint64* pos, call_props_t call_props);
static void handle_completion(thread_t* th);
static void copyout_call_results(thread_t* th);
static void write_call_output(thread_t* th, bool finished);
@@ -653,18 +652,17 @@ void receive_execute()
syscall_timeout_ms = req.syscall_timeout_ms;
program_timeout_ms = req.program_timeout_ms;
slowdown_scale = req.slowdown_scale;
- flag_collect_cover = req.exec_flags & (1 << 0);
- flag_dedup_cover = req.exec_flags & (1 << 1);
- flag_comparisons = req.exec_flags & (1 << 2);
- flag_threaded = req.exec_flags & (1 << 3);
- flag_collide = req.exec_flags & (1 << 4);
+ flag_collect_signal = req.exec_flags & (1 << 0);
+ flag_collect_cover = req.exec_flags & (1 << 1);
+ flag_dedup_cover = req.exec_flags & (1 << 2);
+ flag_comparisons = req.exec_flags & (1 << 3);
+ flag_threaded = req.exec_flags & (1 << 4);
flag_coverage_filter = req.exec_flags & (1 << 5);
- if (!flag_threaded)
- flag_collide = false;
- debug("[%llums] exec opts: procid=%llu threaded=%d collide=%d cover=%d comps=%d dedup=%d"
+
+ debug("[%llums] exec opts: procid=%llu threaded=%d cover=%d comps=%d dedup=%d signal=%d"
" timeouts=%llu/%llu/%llu prog=%llu filter=%d\n",
- current_time_ms() - start_time_ms, procid, flag_threaded, flag_collide,
- flag_collect_cover, flag_comparisons, flag_dedup_cover, syscall_timeout_ms,
+ current_time_ms() - start_time_ms, procid, flag_threaded, flag_collect_cover,
+ flag_comparisons, flag_dedup_cover, flag_collect_signal, syscall_timeout_ms,
program_timeout_ms, slowdown_scale, req.prog_size, flag_coverage_filter);
if (syscall_timeout_ms == 0 || program_timeout_ms <= syscall_timeout_ms || slowdown_scale == 0)
failmsg("bad timeouts", "syscall=%llu, program=%llu, scale=%llu",
@@ -689,6 +687,11 @@ void receive_execute()
failmsg("bad input size", "size=%lld, want=%lld", pos, req.prog_size);
}
+bool cover_collection_required()
+{
+ return flag_coverage && (flag_collect_signal || flag_collect_cover || flag_comparisons);
+}
+
#if GOOS_akaros
void resend_execute(int fd)
{
@@ -718,7 +721,7 @@ void realloc_output_data()
mmap_output(kMaxOutputComparisons);
else if (flag_collect_cover)
mmap_output(kMaxOutputCoverage);
- else if (flag_coverage)
+ else if (flag_collect_signal)
mmap_output(kMaxOutputSignal);
if (close(kOutFd) < 0)
fail("failed to close kOutFd");
@@ -729,21 +732,15 @@ void realloc_output_data()
// execute_one executes program stored in input_data.
void execute_one()
{
- // Duplicate global collide variable on stack.
- // Fuzzer once come up with ioctl(fd, FIONREAD, 0x920000),
- // where 0x920000 was exactly collide address, so every iteration reset collide to 0.
- bool colliding = false;
#if SYZ_EXECUTOR_USES_SHMEM
realloc_output_data();
output_pos = output_data;
write_output(0); // Number of executed syscalls (updated later).
#endif
uint64 start = current_time_ms();
-
-retry:
uint64* input_pos = (uint64*)input_data;
- if (flag_coverage && !colliding) {
+ if (cover_collection_required()) {
if (!flag_threaded)
cover_enable(&threads[0].cov, flag_comparisons, false);
if (flag_extra_coverage)
@@ -753,7 +750,6 @@ retry:
int call_index = 0;
uint64 prog_extra_timeout = 0;
uint64 prog_extra_cover_timeout = 0;
- bool has_fault_injection = false;
call_props_t call_props;
memset(&call_props, 0, sizeof(call_props));
@@ -863,7 +859,6 @@ retry:
prog_extra_cover_timeout = std::max(prog_extra_cover_timeout, 500 * slowdown_scale);
if (strncmp(syscalls[call_num].name, "syz_80211_inject_frame", strlen("syz_80211_inject_frame")) == 0)
prog_extra_cover_timeout = std::max(prog_extra_cover_timeout, 300 * slowdown_scale);
- has_fault_injection |= (call_props.fail_nth > 0);
uint64 copyout_index = read_input(&input_pos);
uint64 num_args = read_input(&input_pos);
if (num_args > kMaxArgs)
@@ -873,12 +868,13 @@ retry:
args[i] = read_arg(&input_pos);
for (uint64 i = num_args; i < kMaxArgs; i++)
args[i] = 0;
- thread_t* th = schedule_call(call_index++, call_num, colliding, copyout_index,
+ thread_t* th = schedule_call(call_index++, call_num, copyout_index,
num_args, args, input_pos, call_props);
- if (colliding && (call_index % 2) == 0) {
- // Don't wait for every other call.
- // We already have results from the previous execution.
+ if (call_props.async) {
+ if (!flag_threaded)
+ fail("SYZFAIL: unable to do an async call in a non-threaded mode");
+ // Don't wait for an async call to finish. We'll wait at the end.
} else if (flag_threaded) {
// Wait for call completion.
uint64 timeout_ms = syscall_timeout_ms + call->attrs.timeout * slowdown_scale;
@@ -907,7 +903,7 @@ retry:
memset(&call_props, 0, sizeof(call_props));
}
- if (!colliding && !collide && running > 0) {
+ if (running > 0) {
// Give unfinished syscalls some additional time.
last_scheduled = 0;
uint64 wait_start = current_time_ms();
@@ -927,7 +923,7 @@ retry:
for (int i = 0; i < kMaxThreads; i++) {
thread_t* th = &threads[i];
if (th->executing) {
- if (flag_coverage)
+ if (cover_collection_required())
cover_collect(&th->cov);
write_call_output(th, false);
}
@@ -939,33 +935,25 @@ retry:
close_fds();
#endif
- if (!colliding && !collide) {
+ write_extra_output();
+ // Check for new extra coverage in small intervals to avoid situation
+ // that we were killed on timeout before we write any.
+ // Check for extra coverage is very cheap, effectively a memory load.
+ const uint64 kSleepMs = 100;
+ for (uint64 i = 0; i < prog_extra_cover_timeout / kSleepMs; i++) {
+ sleep_ms(kSleepMs);
write_extra_output();
- // Check for new extra coverage in small intervals to avoid situation
- // that we were killed on timeout before we write any.
- // Check for extra coverage is very cheap, effectively a memory load.
- const uint64 kSleepMs = 100;
- for (uint64 i = 0; i < prog_extra_cover_timeout / kSleepMs; i++) {
- sleep_ms(kSleepMs);
- write_extra_output();
- }
- }
-
- if (flag_collide && !colliding && !has_fault_injection && !collide) {
- debug("enabling collider\n");
- collide = colliding = true;
- goto retry;
}
}
-thread_t* schedule_call(int call_index, int call_num, bool colliding, uint64 copyout_index, uint64 num_args, uint64* args, uint64* pos, call_props_t call_props)
+thread_t* schedule_call(int call_index, int call_num, uint64 copyout_index, uint64 num_args, uint64* args, uint64* pos, call_props_t call_props)
{
// Find a spare thread to execute the call.
int i = 0;
for (; i < kMaxThreads; i++) {
thread_t* th = &threads[i];
if (!th->created)
- thread_create(th, i, flag_coverage && !colliding);
+ thread_create(th, i, cover_collection_required());
if (event_isset(&th->done)) {
if (th->executing)
handle_completion(th);
@@ -979,7 +967,6 @@ thread_t* schedule_call(int call_index, int call_num, bool colliding, uint64 cop
failmsg("bad thread state in schedule", "ready=%d done=%d executing=%d",
event_isset(&th->ready), event_isset(&th->done), th->executing);
last_scheduled = th;
- th->colliding = colliding;
th->copyout_pos = pos;
th->copyout_index = copyout_index;
event_reset(&th->done);
@@ -1002,44 +989,46 @@ void write_coverage_signal(cover_t* cov, uint32* signal_count_pos, uint32* cover
// Write out feedback signals.
// Currently it is code edges computed as xor of two subsequent basic block PCs.
cover_data_t* cover_data = (cover_data_t*)(cov->data + cov->data_offset);
- uint32 nsig = 0;
- cover_data_t prev_pc = 0;
- bool prev_filter = true;
- for (uint32 i = 0; i < cov->size; i++) {
- cover_data_t pc = cover_data[i] + cov->pc_offset;
- uint32 sig = pc;
- if (use_cover_edges(pc))
- sig ^= hash(prev_pc);
- bool filter = coverage_filter(pc);
- // Ignore the edge only if both current and previous PCs are filtered out
- // to capture all incoming and outcoming edges into the interesting code.
- bool ignore = !filter && !prev_filter;
- prev_pc = pc;
- prev_filter = filter;
- if (ignore || dedup(sig))
- continue;
- write_output(sig);
- nsig++;
+ if (flag_collect_signal) {
+ uint32 nsig = 0;
+ cover_data_t prev_pc = 0;
+ bool prev_filter = true;
+ for (uint32 i = 0; i < cov->size; i++) {
+ cover_data_t pc = cover_data[i] + cov->pc_offset;
+ uint32 sig = pc;
+ if (use_cover_edges(pc))
+ sig ^= hash(prev_pc);
+ bool filter = coverage_filter(pc);
+ // Ignore the edge only if both current and previous PCs are filtered out
+ // to capture all incoming and outcoming edges into the interesting code.
+ bool ignore = !filter && !prev_filter;
+ prev_pc = pc;
+ prev_filter = filter;
+ if (ignore || dedup(sig))
+ continue;
+ write_output(sig);
+ nsig++;
+ }
+ // Write out number of signals.
+ *signal_count_pos = nsig;
}
- // Write out number of signals.
- *signal_count_pos = nsig;
- if (!flag_collect_cover)
- return;
- // Write out real coverage (basic block PCs).
- uint32 cover_size = cov->size;
- if (flag_dedup_cover) {
- cover_data_t* end = cover_data + cover_size;
- cover_unprotect(cov);
- std::sort(cover_data, end);
- cover_size = std::unique(cover_data, end) - cover_data;
- cover_protect(cov);
+ if (flag_collect_cover) {
+ // Write out real coverage (basic block PCs).
+ uint32 cover_size = cov->size;
+ if (flag_dedup_cover) {
+ cover_data_t* end = cover_data + cover_size;
+ cover_unprotect(cov);
+ std::sort(cover_data, end);
+ cover_size = std::unique(cover_data, end) - cover_data;
+ cover_protect(cov);
+ }
+ // Truncate PCs to uint32 assuming that they fit into 32-bits.
+ // True for x86_64 and arm64 without KASLR.
+ for (uint32 i = 0; i < cover_size; i++)
+ write_output(cover_data[i] + cov->pc_offset);
+ *cover_count_pos = cover_size;
}
- // Truncate PCs to uint32 assuming that they fit into 32-bits.
- // True for x86_64 and arm64 without KASLR.
- for (uint32 i = 0; i < cover_size; i++)
- write_output(cover_data[i] + cov->pc_offset);
- *cover_count_pos = cover_size;
}
#endif
@@ -1050,21 +1039,20 @@ void handle_completion(thread_t* th)
event_isset(&th->ready), event_isset(&th->done), th->executing);
if (th->res != (intptr_t)-1)
copyout_call_results(th);
- if (!collide && !th->colliding) {
- write_call_output(th, true);
- write_extra_output();
- }
+
+ write_call_output(th, true);
+ write_extra_output();
th->executing = false;
running--;
if (running < 0) {
// This fires periodically for the past 2 years (see issue #502).
- fprintf(stderr, "running=%d collide=%d completed=%d flag_threaded=%d flag_collide=%d current=%d\n",
- running, collide, completed, flag_threaded, flag_collide, th->id);
+ fprintf(stderr, "running=%d completed=%d flag_threaded=%d current=%d\n",
+ running, completed, flag_threaded, th->id);
for (int i = 0; i < kMaxThreads; i++) {
thread_t* th1 = &threads[i];
- fprintf(stderr, "th #%2d: created=%d executing=%d colliding=%d"
+ fprintf(stderr, "th #%2d: created=%d executing=%d"
" ready=%d done=%d call_index=%d res=%lld reserrno=%d\n",
- i, th1->created, th1->executing, th1->colliding,
+ i, th1->created, th1->executing,
event_isset(&th1->ready), event_isset(&th1->done),
th1->call_index, (uint64)th1->res, th1->reserrno);
}
@@ -1143,7 +1131,7 @@ void write_call_output(thread_t* th, bool finished)
}
// Write out number of comparisons.
*comps_count_pos = comps_size;
- } else if (flag_coverage) {
+ } else if (flag_collect_signal || flag_collect_cover) {
if (is_kernel_64_bit)
write_coverage_signal<uint64>(&th->cov, signal_count_pos, cover_count_pos);
else
@@ -1176,7 +1164,7 @@ void write_call_output(thread_t* th, bool finished)
void write_extra_output()
{
#if SYZ_EXECUTOR_USES_SHMEM
- if (!flag_coverage || !flag_extra_coverage || flag_comparisons)
+ if (!cover_collection_required() || !flag_extra_coverage || flag_comparisons)
return;
cover_collect(&extra_cov);
if (!extra_cov.size)
@@ -1230,7 +1218,7 @@ void* worker_thread(void* arg)
{
thread_t* th = (thread_t*)arg;
current_thread = th;
- if (flag_coverage)
+ if (cover_collection_required())
cover_enable(&th->cov, flag_comparisons, false);
for (;;) {
event_wait(&th->ready);
@@ -1256,8 +1244,6 @@ void execute_call(thread_t* th)
int fail_fd = -1;
th->soft_fail_state = false;
if (th->call_props.fail_nth > 0) {
- if (collide)
- fail("both collide and fault injection are enabled");
fail_fd = inject_fault(th->call_props.fail_nth);
th->soft_fail_state = true;
}
diff --git a/pkg/csource/common.go b/pkg/csource/common.go
index a1525e24f..7e396931c 100644
--- a/pkg/csource/common.go
+++ b/pkg/csource/common.go
@@ -91,23 +91,23 @@ func defineList(p, mmapProg *prog.Prog, opts Options) (defines []string) {
func commonDefines(p *prog.Prog, opts Options) map[string]bool {
sysTarget := targets.Get(p.Target.OS, p.Target.Arch)
- bitmasks, csums := prog.RequiredFeatures(p)
+ features := p.RequiredFeatures()
return map[string]bool{
"GOOS_" + p.Target.OS: true,
"GOARCH_" + p.Target.Arch: true,
"HOSTGOOS_" + runtime.GOOS: true,
- "SYZ_USE_BITMASKS": bitmasks,
- "SYZ_USE_CHECKSUMS": csums,
+ "SYZ_USE_BITMASKS": features.Bitmasks,
+ "SYZ_USE_CHECKSUMS": features.Csums,
"SYZ_SANDBOX_NONE": opts.Sandbox == sandboxNone,
"SYZ_SANDBOX_SETUID": opts.Sandbox == sandboxSetuid,
"SYZ_SANDBOX_NAMESPACE": opts.Sandbox == sandboxNamespace,
"SYZ_SANDBOX_ANDROID": opts.Sandbox == sandboxAndroid,
"SYZ_THREADED": opts.Threaded,
- "SYZ_COLLIDE": opts.Collide,
+ "SYZ_ASYNC": features.Async,
"SYZ_REPEAT": opts.Repeat,
"SYZ_REPEAT_TIMES": opts.RepeatTimes > 1,
"SYZ_MULTI_PROC": opts.Procs > 1,
- "SYZ_FAULT": p.HasFaultInjection(),
+ "SYZ_FAULT": features.FaultInjection,
"SYZ_LEAK": opts.Leak,
"SYZ_NET_INJECTION": opts.NetInjection,
"SYZ_NET_DEVICES": opts.NetDevices,
diff --git a/pkg/csource/csource.go b/pkg/csource/csource.go
index 68f961526..32e01ff1e 100644
--- a/pkg/csource/csource.go
+++ b/pkg/csource/csource.go
@@ -115,6 +115,17 @@ func (ctx *context) generateSource() ([]byte, error) {
}
}
replacements["CALL_TIMEOUT_MS"] = timeoutExpr
+ if ctx.p.RequiredFeatures().Async {
+ conditions := []string{}
+ for idx, call := range ctx.p.Calls {
+ if !call.Props.Async {
+ continue
+ }
+ conditions = append(conditions, fmt.Sprintf("call == %v", idx))
+ }
+ replacements["ASYNC_CONDITIONS"] = strings.Join(conditions, " || ")
+ }
+
result, err := createCommonHeader(ctx.p, mmapProg, replacements, ctx.opts)
if err != nil {
return nil, err
diff --git a/pkg/csource/csource_test.go b/pkg/csource/csource_test.go
index aa242e876..885d75f67 100644
--- a/pkg/csource/csource_test.go
+++ b/pkg/csource/csource_test.go
@@ -70,10 +70,13 @@ func testTarget(t *testing.T, target *prog.Target, full bool) {
p.Calls = append(p.Calls, minimized.Calls...)
opts = allOptionsPermutations(target.OS)
}
+ // Test fault injection and async call generation as well.
if len(p.Calls) > 0 {
- // Test fault injection code generation as well.
p.Calls[0].Props.FailNth = 1
}
+ if len(p.Calls) > 1 {
+ p.Calls[1].Props.Async = true
+ }
for opti, opts := range opts {
if testing.Short() && opts.HandleSegv {
// HandleSegv can radically increase compilation time/memory consumption on large programs.
diff --git a/pkg/csource/generated.go b/pkg/csource/generated.go
index 728806967..851bf935a 100644
--- a/pkg/csource/generated.go
+++ b/pkg/csource/generated.go
@@ -10457,10 +10457,6 @@ static void loop(void)
fprintf(stderr, "### start\n");
#endif
int i, call, thread;
-#if SYZ_COLLIDE
- int collide = 0;
-again:
-#endif
for (call = 0; call < /*{{{NUM_CALLS}}}*/; call++) {
for (thread = 0; thread < (int)(sizeof(threads) / sizeof(threads[0])); thread++) {
struct thread_t* th = &threads[thread];
@@ -10477,8 +10473,8 @@ again:
th->call = call;
__atomic_fetch_add(&running, 1, __ATOMIC_RELAXED);
event_set(&th->ready);
-#if SYZ_COLLIDE
- if (collide && (call % 2) == 0)
+#if SYZ_ASYNC
+ if (/*{{{ASYNC_CONDITIONS}}}*/)
break;
#endif
event_timedwait(&th->done, /*{{{CALL_TIMEOUT_MS}}}*/);
@@ -10490,12 +10486,6 @@ again:
#if SYZ_HAVE_CLOSE_FDS
close_fds();
#endif
-#if SYZ_COLLIDE
- if (!collide) {
- collide = 1;
- goto again;
- }
-#endif
}
#endif
diff --git a/pkg/csource/options.go b/pkg/csource/options.go
index 36490c8b8..3fc549282 100644
--- a/pkg/csource/options.go
+++ b/pkg/csource/options.go
@@ -19,7 +19,6 @@ import (
// Dashboard also provides serialized Options along with syzkaller reproducers.
type Options struct {
Threaded bool `json:"threaded,omitempty"`
- Collide bool `json:"collide,omitempty"`
Repeat bool `json:"repeat,omitempty"`
RepeatTimes int `json:"repeat_times,omitempty"` // if non-0, repeat that many times
Procs int `json:"procs"`
@@ -55,6 +54,7 @@ type Options struct {
// These are legacy options, they remain only for the sake of backward compatibility.
type LegacyOptions struct {
+ Collide bool `json:"collide,omitempty"`
Fault bool `json:"fault,omitempty"`
FaultCall int `json:"fault_call,omitempty"`
FaultNth int `json:"fault_nth,omitempty"`
@@ -158,7 +158,6 @@ func (opts Options) checkLinuxOnly(OS string) error {
func DefaultOpts(cfg *mgrconfig.Config) Options {
opts := Options{
Threaded: true,
- Collide: true,
Repeat: true,
Procs: cfg.Procs,
Slowdown: cfg.Timeouts.Slowdown,
@@ -322,7 +321,6 @@ func PrintAvailableFeaturesFlags() {
// This is the main configuration used by executor, only for testing.
var ExecutorOpts = Options{
Threaded: true,
- Collide: true,
Repeat: true,
Procs: 2,
Slowdown: 1,
diff --git a/pkg/csource/options_test.go b/pkg/csource/options_test.go
index ba31e4c95..5e971a9d1 100644
--- a/pkg/csource/options_test.go
+++ b/pkg/csource/options_test.go
@@ -34,7 +34,6 @@ func TestParseOptionsCanned(t *testing.T) {
"netdev":true,"resetnet":true,
"segv":true,"waitrepeat":true,"debug":true,"repro":true}`: {
Threaded: true,
- Collide: true,
Repeat: true,
Procs: 10,
Slowdown: 1,
@@ -49,6 +48,7 @@ func TestParseOptionsCanned(t *testing.T) {
HandleSegv: true,
Repro: true,
LegacyOptions: LegacyOptions{
+ Collide: true,
Fault: true,
FaultCall: 1,
FaultNth: 2,
@@ -59,7 +59,6 @@ func TestParseOptionsCanned(t *testing.T) {
"netdev":true,"resetnet":true,
"segv":true,"waitrepeat":true,"debug":true,"repro":true}`: {
Threaded: true,
- Collide: true,
Repeat: true,
Procs: 10,
Slowdown: 1,
@@ -74,6 +73,7 @@ func TestParseOptionsCanned(t *testing.T) {
HandleSegv: true,
Repro: true,
LegacyOptions: LegacyOptions{
+ Collide: true,
Fault: true,
FaultCall: 1,
FaultNth: 2,
@@ -81,7 +81,6 @@ func TestParseOptionsCanned(t *testing.T) {
},
"{Threaded:true Collide:true Repeat:true Procs:1 Sandbox:none Fault:false FaultCall:-1 FaultNth:0 EnableTun:true UseTmpDir:true HandleSegv:true WaitRepeat:true Debug:false Repro:false}": {
Threaded: true,
- Collide: true,
Repeat: true,
Procs: 1,
Slowdown: 1,
@@ -94,6 +93,7 @@ func TestParseOptionsCanned(t *testing.T) {
HandleSegv: true,
Repro: false,
LegacyOptions: LegacyOptions{
+ Collide: true,
Fault: false,
FaultCall: -1,
FaultNth: 0,
@@ -101,7 +101,6 @@ func TestParseOptionsCanned(t *testing.T) {
},
"{Threaded:true Collide:true Repeat:true Procs:1 Sandbox: Fault:false FaultCall:-1 FaultNth:0 EnableTun:true UseTmpDir:true HandleSegv:true WaitRepeat:true Debug:false Repro:false}": {
Threaded: true,
- Collide: true,
Repeat: true,
Procs: 1,
Slowdown: 1,
@@ -114,6 +113,7 @@ func TestParseOptionsCanned(t *testing.T) {
HandleSegv: true,
Repro: false,
LegacyOptions: LegacyOptions{
+ Collide: true,
Fault: false,
FaultCall: -1,
FaultNth: 0,
@@ -121,7 +121,6 @@ func TestParseOptionsCanned(t *testing.T) {
},
"{Threaded:false Collide:true Repeat:true Procs:1 Sandbox:namespace Fault:false FaultCall:-1 FaultNth:0 EnableTun:true UseTmpDir:true EnableCgroups:true HandleSegv:true WaitRepeat:true Debug:false Repro:false}": {
Threaded: false,
- Collide: true,
Repeat: true,
Procs: 1,
Slowdown: 1,
@@ -134,6 +133,7 @@ func TestParseOptionsCanned(t *testing.T) {
HandleSegv: true,
Repro: false,
LegacyOptions: LegacyOptions{
+ Collide: true,
Fault: false,
FaultCall: -1,
FaultNth: 0,
diff --git a/pkg/instance/instance_test.go b/pkg/instance/instance_test.go
index 5b0bcd4e6..e27361091 100644
--- a/pkg/instance/instance_test.go
+++ b/pkg/instance/instance_test.go
@@ -91,12 +91,15 @@ func TestExecprogCmd(t *testing.T) {
flagFaultNth := flags.Int("fault_nth", 0, "inject fault on n-th operation (0-based)")
flagExecutor := flags.String("executor", "./syz-executor", "path to executor binary")
flagThreaded := flags.Bool("threaded", true, "use threaded mode in executor")
- flagCollide := flags.Bool("collide", true, "collide syscalls to provoke data races")
+ // In the older syzkaller versions `collide` flag defaulted to `true`, but in this
+ // test we can change it to false (new default), because syzkaller always explicitly
+ // sets this flag and never relies on the default value.
+ flagCollide := flags.Bool("collide", false, "collide syscalls to provoke data races")
flagSignal := flags.Bool("cover", false, "collect feedback signals (coverage)")
flagSandbox := flags.String("sandbox", "none", "sandbox for fuzzing (none/setuid/namespace)")
flagSlowdown := flags.Int("slowdown", 1, "")
cmdLine := ExecprogCmd(os.Args[0], "/myexecutor", targets.FreeBSD, targets.I386,
- "namespace", true, false, false, 7, 2, 3, true, 10, "myprog")
+ "namespace", true, false, true, 7, 2, 3, true, 10, "myprog")
args := strings.Split(cmdLine, " ")[1:]
if err := tool.ParseFlags(flags, args); err != nil {
t.Fatal(err)
@@ -134,8 +137,8 @@ func TestExecprogCmd(t *testing.T) {
if *flagThreaded {
t.Errorf("bad threaded: %v, want: %v", *flagThreaded, false)
}
- if *flagCollide {
- t.Errorf("bad collide: %v, want: %v", *flagCollide, false)
+ if !*flagCollide {
+ t.Errorf("bad collide: %v, want: %v", *flagCollide, true)
}
if *flagSlowdown != 10 {
t.Errorf("bad slowdown: %v, want: %v", *flagSlowdown, 10)
diff --git a/pkg/ipc/ipc.go b/pkg/ipc/ipc.go
index 03b28e4ce..5bf4738ca 100644
--- a/pkg/ipc/ipc.go
+++ b/pkg/ipc/ipc.go
@@ -49,11 +49,11 @@ const (
type ExecFlags uint64
const (
- FlagCollectCover ExecFlags = 1 << iota // collect coverage
+ FlagCollectSignal ExecFlags = 1 << iota // collect feedback signals
+ FlagCollectCover // collect coverage
FlagDedupCover // deduplicate coverage in executor
FlagCollectComps // collect KCOV comparisons
FlagThreaded // use multiple threads to mitigate blocked syscalls
- FlagCollide // collide syscalls to provoke data races
FlagEnableCoverageFilter // setup and use bitmap to do coverage filter
)
diff --git a/pkg/ipc/ipc_test.go b/pkg/ipc/ipc_test.go
index 44fdb67bd..bb110e388 100644
--- a/pkg/ipc/ipc_test.go
+++ b/pkg/ipc/ipc_test.go
@@ -87,7 +87,7 @@ func TestExecute(t *testing.T) {
bin := buildExecutor(t, target)
defer os.Remove(bin)
- flags := []ExecFlags{0, FlagThreaded, FlagThreaded | FlagCollide}
+ flags := []ExecFlags{0, FlagThreaded}
for _, flag := range flags {
t.Logf("testing flags 0x%x\n", flag)
cfg := &Config{
diff --git a/pkg/ipc/ipcconfig/ipcconfig.go b/pkg/ipc/ipcconfig/ipcconfig.go
index 3791322f2..5be4d4b39 100644
--- a/pkg/ipc/ipcconfig/ipcconfig.go
+++ b/pkg/ipc/ipcconfig/ipcconfig.go
@@ -14,7 +14,6 @@ import (
var (
flagExecutor = flag.String("executor", "./syz-executor", "path to executor binary")
flagThreaded = flag.Bool("threaded", true, "use threaded mode in executor")
- flagCollide = flag.Bool("collide", true, "collide syscalls to provoke data races")
flagSignal = flag.Bool("cover", false, "collect feedback signals (coverage)")
flagSandbox = flag.String("sandbox", "none", "sandbox for fuzzing (none/setuid/namespace/android)")
flagDebug = flag.Bool("debug", false, "debug output from executor")
@@ -46,8 +45,8 @@ func Default(target *prog.Target) (*ipc.Config, *ipc.ExecOpts, error) {
if *flagThreaded {
opts.Flags |= ipc.FlagThreaded
}
- if *flagCollide {
- opts.Flags |= ipc.FlagCollide
+ if *flagSignal {
+ opts.Flags |= ipc.FlagCollectSignal
}
return c, opts, nil
diff --git a/pkg/repro/repro.go b/pkg/repro/repro.go
index 8eebb5c6d..06afec8c2 100644
--- a/pkg/repro/repro.go
+++ b/pkg/repro/repro.go
@@ -442,7 +442,7 @@ func (ctx *context) minimizeProg(res *Result) (*Result, error) {
return res, nil
}
-// Simplify repro options (threaded, collide, sandbox, etc).
+// Simplify repro options (threaded, sandbox, etc).
func (ctx *context) simplifyProg(res *Result) (*Result, error) {
ctx.reproLogf(2, "simplifying guilty program options")
start := time.Now()
@@ -784,13 +784,6 @@ type Simplify func(opts *csource.Options) bool
var progSimplifies = []Simplify{
func(opts *csource.Options) bool {
- if !opts.Collide {
- return false
- }
- opts.Collide = false
- return true
- },
- func(opts *csource.Options) bool {
if opts.Collide || !opts.Threaded {
return false
}
diff --git a/pkg/repro/repro_test.go b/pkg/repro/repro_test.go
index 9b639cd7d..d02ea6d67 100644
--- a/pkg/repro/repro_test.go
+++ b/pkg/repro/repro_test.go
@@ -79,7 +79,6 @@ func TestBisect(t *testing.T) {
func TestSimplifies(t *testing.T) {
opts := csource.Options{
Threaded: true,
- Collide: true,
Repeat: true,
Procs: 10,
Sandbox: "namespace",
diff --git a/pkg/runtest/run.go b/pkg/runtest/run.go
index 1d3412cb0..87634a3dc 100644
--- a/pkg/runtest/run.go
+++ b/pkg/runtest/run.go
@@ -403,7 +403,7 @@ func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bo
}
cfg.Flags |= sandboxFlags
if threaded {
- opts.Flags |= ipc.FlagThreaded | ipc.FlagCollide
+ opts.Flags |= ipc.FlagThreaded
}
if cov {
cfg.Flags |= ipc.FlagSignal
@@ -447,7 +447,6 @@ func (ctx *Context) createSyzTest(p *prog.Prog, sandbox string, threaded, cov bo
func (ctx *Context) createCTest(p *prog.Prog, sandbox string, threaded bool, times int) (*RunRequest, error) {
opts := csource.Options{
Threaded: threaded,
- Collide: false,
Repeat: times > 1,
RepeatTimes: times,
Procs: 1,
@@ -485,7 +484,7 @@ func (ctx *Context) createCTest(p *prog.Prog, sandbox string, threaded bool, tim
}
var ipcFlags ipc.ExecFlags
if threaded {
- ipcFlags |= ipc.FlagThreaded | ipc.FlagCollide
+ ipcFlags |= ipc.FlagThreaded
}
req := &RunRequest{
P: p,
diff --git a/prog/analysis.go b/prog/analysis.go
index 6643941ff..697e1eab5 100644
--- a/prog/analysis.go
+++ b/prog/analysis.go
@@ -160,29 +160,34 @@ func foreachArgImpl(arg Arg, ctx *ArgCtx, f func(Arg, *ArgCtx)) {
}
}
-func RequiredFeatures(p *Prog) (bitmasks, csums bool) {
+type RequiredFeatures struct {
+ Bitmasks bool
+ Csums bool
+ FaultInjection bool
+ Async bool
+}
+
+func (p *Prog) RequiredFeatures() RequiredFeatures {
+ features := RequiredFeatures{}
for _, c := range p.Calls {
ForeachArg(c, func(arg Arg, _ *ArgCtx) {
if a, ok := arg.(*ConstArg); ok {
if a.Type().BitfieldOffset() != 0 || a.Type().BitfieldLength() != 0 {
- bitmasks = true
+ features.Bitmasks = true
}
}
if _, ok := arg.Type().(*CsumType); ok {
- csums = true
+ features.Csums = true
}
})
- }
- return
-}
-
-func (p *Prog) HasFaultInjection() bool {
- for _, call := range p.Calls {
- if call.Props.FailNth > 0 {
- return true
+ if c.Props.FailNth > 0 {
+ features.FaultInjection = true
+ }
+ if c.Props.Async {
+ features.Async = true
}
}
- return false
+ return features
}
type CallFlags int
diff --git a/prog/collide.go b/prog/collide.go
new file mode 100644
index 000000000..cd059c60f
--- /dev/null
+++ b/prog/collide.go
@@ -0,0 +1,57 @@
+// Copyright 2021 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+// Contains prog transformations that intend to trigger more races.
+
+package prog
+
+import "math/rand"
+
+// The executor has no more than 32 threads that are used both for async calls and for calls
+// that timed out. If we just ignore that limit, we could end up generating programs that
+// would force the executor to fail and thus stall the fuzzing process.
+// As an educated guess, let's use no more than 24 async calls to let executor handle everything.
+const maxAsyncPerProg = 24
+
+// Ensures that if an async call produces a resource, then
+// it is distanced from a call consuming the resource at least
+// by one non-async call.
+// This does not give 100% guarantee that the async call finishes
+// by that time, but hopefully this is enough for most cases.
+func AssignRandomAsync(origProg *Prog, rand *rand.Rand) *Prog {
+ var unassigned map[*ResultArg]bool
+ leftAsync := maxAsyncPerProg
+ prog := origProg.Clone()
+ for i := len(prog.Calls) - 1; i >= 0 && leftAsync > 0; i-- {
+ call := prog.Calls[i]
+ producesUnassigned := false
+ consumes := make(map[*ResultArg]bool)
+ ForeachArg(call, func(arg Arg, ctx *ArgCtx) {
+ res, ok := arg.(*ResultArg)
+ if !ok {
+ return
+ }
+ if res.Dir() != DirIn && unassigned[res] {
+ // If this call is made async, at least one of the resources
+ // will be empty when it's needed.
+ producesUnassigned = true
+ }
+ if res.Dir() != DirOut {
+ consumes[res.Res] = true
+ }
+ })
+ // Make async with a 66% chance (but never the last call).
+ if !producesUnassigned && i+1 != len(prog.Calls) && rand.Intn(3) != 0 {
+ call.Props.Async = true
+ for res := range consumes {
+ unassigned[res] = true
+ }
+ leftAsync--
+ } else {
+ call.Props.Async = false
+ unassigned = consumes
+ }
+ }
+
+ return prog
+}
diff --git a/prog/collide_test.go b/prog/collide_test.go
new file mode 100644
index 000000000..614b677ef
--- /dev/null
+++ b/prog/collide_test.go
@@ -0,0 +1,84 @@
+// Copyright 2021 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package prog
+
+import (
+ "math/rand"
+ "testing"
+)
+
+func TestAssignRandomAsync(t *testing.T) {
+ tests := []struct {
+ os string
+ arch string
+ orig string
+ check func(*Prog) bool
+ }{
+ {
+ "linux", "amd64",
+ `r0 = openat(0xffffffffffffff9c, &AUTO='./file1\x00', 0x42, 0x1ff)
+write(r0, &AUTO="01010101", 0x4)
+read(r0, &AUTO=""/4, 0x4)
+close(r0)
+`,
+ func(p *Prog) bool {
+ return !p.Calls[0].Props.Async
+ },
+ },
+ {
+ "linux", "amd64",
+ `r0 = openat(0xffffffffffffff9c, &AUTO='./file1\x00', 0x42, 0x1ff)
+nanosleep(&AUTO={0x0,0x4C4B40}, &AUTO={0,0})
+write(r0, &AUTO="01010101", 0x4)
+read(r0, &AUTO=""/4, 0x4)
+close(r0)
+`,
+ func(p *Prog) bool {
+ return !p.Calls[0].Props.Async || !p.Calls[1].Props.Async
+ },
+ },
+ {
+ "linux", "amd64",
+ `r0 = openat(0xffffffffffffff9c, &AUTO='./file1\x00', 0x42, 0x1ff)
+r1 = dup(r0)
+r2 = dup(r1)
+r3 = dup(r2)
+r4 = dup(r3)
+`,
+ func(p *Prog) bool {
+ for _, call := range p.Calls[0 : len(p.Calls)-1] {
+ if call.Props.Async {
+ return false
+ }
+ }
+ return true
+ },
+ },
+ }
+ _, rs, iters := initTest(t)
+ r := rand.New(rs)
+ anyAsync := false
+ for _, test := range tests {
+ target, err := GetTarget(test.os, test.arch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ p, err := target.Deserialize([]byte(test.orig), Strict)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i := 0; i < iters; i++ {
+ collided := AssignRandomAsync(p, r)
+ if !test.check(collided) {
+ t.Fatalf("bad async assignment:\n%s\n", collided.Serialize())
+ }
+ for _, call := range collided.Calls {
+ anyAsync = anyAsync || call.Props.Async
+ }
+ }
+ }
+ if !anyAsync {
+ t.Fatalf("not a single async was assigned")
+ }
+}
diff --git a/prog/decodeexec.go b/prog/decodeexec.go
index de62879f7..db89aa82e 100644
--- a/prog/decodeexec.go
+++ b/prog/decodeexec.go
@@ -145,6 +145,10 @@ func (dec *execDecoder) readCallProps(props *CallProps) {
switch kind := value.Kind(); kind {
case reflect.Int:
value.SetInt(int64(arg))
+ case reflect.Bool:
+ if arg == 1 {
+ value.SetBool(true)
+ }
default:
panic("Unsupported (yet) kind: " + kind.String())
}
diff --git a/prog/encoding.go b/prog/encoding.go
index 7c8cf6878..98dee8a8e 100644
--- a/prog/encoding.go
+++ b/prog/encoding.go
@@ -99,6 +99,7 @@ func (ctx *serializer) call(c *Call) {
switch kind := value.Kind(); kind {
case reflect.Int:
ctx.printf(": %d", value.Int())
+ case reflect.Bool:
default:
panic("unable to serialize call prop of type " + kind.String())
}
@@ -376,6 +377,8 @@ func (p *parser) parseCallProps() CallProps {
} else {
value.SetInt(intV)
}
+ case reflect.Bool:
+ value.SetBool(true)
default:
panic("unable to handle call props of type " + kind.String())
}
diff --git a/prog/encoding_test.go b/prog/encoding_test.go
index 11e71867f..e0e6fbefd 100644
--- a/prog/encoding_test.go
+++ b/prog/encoding_test.go
@@ -421,7 +421,7 @@ func TestSerializeCallProps(t *testing.T) {
},
{
"serialize0(0x0) (fail_nth: 5)\n",
- []CallProps{{5}},
+ []CallProps{{5, false}},
},
{
"serialize0(0x0) (fail_nth)\n",
@@ -431,6 +431,10 @@ func TestSerializeCallProps(t *testing.T) {
"serialize0(0x0) (fail_nth: \"5\")\n",
nil,
},
+ {
+ "serialize0(0x0) (async)\n",
+ []CallProps{{0, true}},
+ },
}
for _, test := range tests {
diff --git a/prog/encodingexec.go b/prog/encodingexec.go
index fea114717..44a49fc58 100644
--- a/prog/encodingexec.go
+++ b/prog/encodingexec.go
@@ -134,12 +134,18 @@ type argInfo struct {
func (w *execContext) writeCallProps(props CallProps) {
w.write(execInstrSetProps)
props.ForeachProp(func(_, _ string, value reflect.Value) {
+ var uintVal uint64
switch kind := value.Kind(); kind {
case reflect.Int:
- w.write(uint64(value.Int()))
+ uintVal = uint64(value.Int())
+ case reflect.Bool:
+ if value.Bool() {
+ uintVal = 1
+ }
default:
panic("Unsupported (yet) kind: " + kind.String())
}
+ w.write(uintVal)
})
}
diff --git a/prog/encodingexec_test.go b/prog/encodingexec_test.go
index 5e0f73ce6..fe6a4dfb4 100644
--- a/prog/encodingexec_test.go
+++ b/prog/encodingexec_test.go
@@ -465,11 +465,14 @@ func TestSerializeForExec(t *testing.T) {
{
`test() (fail_nth: 3)
test() (fail_nth: 4)
+test() (async)
`,
[]uint64{
- execInstrSetProps, 3,
+ execInstrSetProps, 3, 0,
callID("test"), ExecNoCopyout, 0,
- execInstrSetProps, 4,
+ execInstrSetProps, 4, 0,
+ callID("test"), ExecNoCopyout, 0,
+ execInstrSetProps, 0, 1,
callID("test"), ExecNoCopyout, 0,
execInstrEOF,
},
@@ -478,12 +481,17 @@ test() (fail_nth: 4)
{
Meta: target.SyscallMap["test"],
Index: ExecNoCopyout,
- Props: CallProps{3},
+ Props: CallProps{3, false},
+ },
+ {
+ Meta: target.SyscallMap["test"],
+ Index: ExecNoCopyout,
+ Props: CallProps{4, false},
},
{
Meta: target.SyscallMap["test"],
Index: ExecNoCopyout,
- Props: CallProps{4},
+ Props: CallProps{0, true},
},
},
},
diff --git a/prog/minimization.go b/prog/minimization.go
index 60a715b66..89ed6e142 100644
--- a/prog/minimization.go
+++ b/prog/minimization.go
@@ -5,6 +5,7 @@ package prog
import (
"fmt"
+ "reflect"
)
// Minimize minimizes program p into an equivalent program using the equivalence
@@ -28,6 +29,9 @@ func Minimize(p0 *Prog, callIndex0 int, crash bool, pred0 func(*Prog, int) bool)
// Try to remove all calls except the last one one-by-one.
p0, callIndex0 = removeCalls(p0, callIndex0, crash, pred)
+ // Try to reset all call props to their default values.
+ p0 = resetCallProps(p0, callIndex0, pred)
+
// Try to minimize individual calls.
for i := 0; i < len(p0.Calls); i++ {
ctx := &minimizeArgsCtx{
@@ -78,6 +82,23 @@ func removeCalls(p0 *Prog, callIndex0 int, crash bool, pred func(*Prog, int) boo
return p0, callIndex0
}
+func resetCallProps(p0 *Prog, callIndex0 int, pred func(*Prog, int) bool) *Prog {
+ // Try to reset all call props to their default values.
+ // This should be reasonable for many progs.
+ p := p0.Clone()
+ anyDifferent := false
+ for idx := range p.Calls {
+ if !reflect.DeepEqual(p.Calls[idx].Props, CallProps{}) {
+ p.Calls[idx].Props = CallProps{}
+ anyDifferent = true
+ }
+ }
+ if anyDifferent && pred(p, callIndex0) {
+ return p
+ }
+ return p0
+}
+
func minimizeCallProps(p0 *Prog, callIndex, callIndex0 int, pred func(*Prog, int) bool) *Prog {
props := p0.Calls[callIndex].Props
@@ -90,6 +111,15 @@ func minimizeCallProps(p0 *Prog, callIndex, callIndex0 int, pred func(*Prog, int
}
}
+ // Try to drop async.
+ if props.Async {
+ p := p0.Clone()
+ p.Calls[callIndex].Props.Async = false
+ if pred(p, callIndex0) {
+ p0 = p
+ }
+ }
+
return p0
}
diff --git a/prog/minimization_test.go b/prog/minimization_test.go
index 032b2b080..cf499b7f1 100644
--- a/prog/minimization_test.go
+++ b/prog/minimization_test.go
@@ -171,6 +171,28 @@ func TestMinimize(t *testing.T) {
"pipe2(0x0, 0x0) (fail_nth: 5)\n",
-1,
},
+ // Clear unneeded async flag.
+ {
+ "linux", "amd64",
+ "pipe2(0x0, 0x0) (async)\n",
+ -1,
+ func(p *Prog, callIndex int) bool {
+ return len(p.Calls) == 1 && p.Calls[0].Meta.Name == "pipe2"
+ },
+ "pipe2(0x0, 0x0)\n",
+ -1,
+ },
+ // Keep important async flag.
+ {
+ "linux", "amd64",
+ "pipe2(0x0, 0x0) (async)\n",
+ -1,
+ func(p *Prog, callIndex int) bool {
+ return len(p.Calls) == 1 && p.Calls[0].Meta.Name == "pipe2" && p.Calls[0].Props.Async
+ },
+ "pipe2(0x0, 0x0) (async)\n",
+ -1,
+ },
}
t.Parallel()
for ti, test := range tests {
diff --git a/prog/parse_test.go b/prog/parse_test.go
index 8de2e36da..eddd03873 100644
--- a/prog/parse_test.go
+++ b/prog/parse_test.go
@@ -30,7 +30,7 @@ gettid()
if ent.Proc != 0 {
t.Fatalf("proc %v, want 0", ent.Proc)
}
- if ent.P.HasFaultInjection() {
+ if ent.P.RequiredFeatures().FaultInjection {
t.Fatalf("fault injection enabled")
}
want := "getpid-gettid"
@@ -67,7 +67,7 @@ func TestParseMulti(t *testing.T) {
t.Fatalf("bad procs")
}
for i, ent := range entries {
- if ent.P.HasFaultInjection() {
+ if ent.P.RequiredFeatures().FaultInjection {
t.Fatalf("prog %v has fault injection enabled", i)
}
}
diff --git a/prog/prog.go b/prog/prog.go
index d41117a2f..09da7fdf2 100644
--- a/prog/prog.go
+++ b/prog/prog.go
@@ -19,7 +19,8 @@ type Prog struct {
// IMPORTANT: keep the exact values of "key" tag for existing props unchanged,
// otherwise the backwards compatibility would be broken.
type CallProps struct {
- FailNth int `key:"fail_nth"`
+ FailNth int `key:"fail_nth"`
+ Async bool `key:"async"`
}
type Call struct {
diff --git a/syz-fuzzer/fuzzer.go b/syz-fuzzer/fuzzer.go
index d8961bfae..6d723967e 100644
--- a/syz-fuzzer/fuzzer.go
+++ b/syz-fuzzer/fuzzer.go
@@ -81,6 +81,7 @@ const (
StatSmash
StatHint
StatSeed
+ StatCollide
StatCount
)
@@ -93,6 +94,7 @@ var statNames = [StatCount]string{
StatSmash: "exec smash",
StatHint: "exec hints",
StatSeed: "exec seeds",
+ StatCollide: "exec collide",
}
type OutputType int
diff --git a/syz-fuzzer/proc.go b/syz-fuzzer/proc.go
index 3f9b265e0..a232327de 100644
--- a/syz-fuzzer/proc.go
+++ b/syz-fuzzer/proc.go
@@ -24,14 +24,14 @@ import (
// Proc represents a single fuzzing process (executor).
type Proc struct {
- fuzzer *Fuzzer
- pid int
- env *ipc.Env
- rnd *rand.Rand
- execOpts *ipc.ExecOpts
- execOptsCover *ipc.ExecOpts
- execOptsComps *ipc.ExecOpts
- execOptsNoCollide *ipc.ExecOpts
+ fuzzer *Fuzzer
+ pid int
+ env *ipc.Env
+ rnd *rand.Rand
+ execOpts *ipc.ExecOpts
+ execOptsCollide *ipc.ExecOpts
+ execOptsCover *ipc.ExecOpts
+ execOptsComps *ipc.ExecOpts
}
func newProc(fuzzer *Fuzzer, pid int) (*Proc, error) {
@@ -40,21 +40,21 @@ func newProc(fuzzer *Fuzzer, pid int) (*Proc, error) {
return nil, err
}
rnd := rand.New(rand.NewSource(time.Now().UnixNano() + int64(pid)*1e12))
- execOptsNoCollide := *fuzzer.execOpts
- execOptsNoCollide.Flags &= ^ipc.FlagCollide
- execOptsCover := execOptsNoCollide
+ execOptsCollide := *fuzzer.execOpts
+ execOptsCollide.Flags &= ^ipc.FlagCollectSignal
+ execOptsCover := *fuzzer.execOpts
execOptsCover.Flags |= ipc.FlagCollectCover
- execOptsComps := execOptsNoCollide
+ execOptsComps := *fuzzer.execOpts
execOptsComps.Flags |= ipc.FlagCollectComps
proc := &Proc{
- fuzzer: fuzzer,
- pid: pid,
- env: env,
- rnd: rnd,
- execOpts: fuzzer.execOpts,
- execOptsCover: &execOptsCover,
- execOptsComps: &execOptsComps,
- execOptsNoCollide: &execOptsNoCollide,
+ fuzzer: fuzzer,
+ pid: pid,
+ env: env,
+ rnd: rnd,
+ execOpts: fuzzer.execOpts,
+ execOptsCollide: &execOptsCollide,
+ execOptsCover: &execOptsCover,
+ execOptsComps: &execOptsComps,
}
return proc, nil
}
@@ -88,13 +88,13 @@ func (proc *Proc) loop() {
// Generate a new prog.
p := proc.fuzzer.target.Generate(proc.rnd, prog.RecommendedCalls, ct)
log.Logf(1, "#%v: generated", proc.pid)
- proc.execute(proc.execOpts, p, ProgNormal, StatGenerate)
+ proc.executeAndCollide(proc.execOpts, p, ProgNormal, StatGenerate)
} else {
// Mutate an existing prog.
p := fuzzerSnapshot.chooseProgram(proc.rnd).Clone()
p.Mutate(proc.rnd, prog.RecommendedCalls, ct, fuzzerSnapshot.corpus)
log.Logf(1, "#%v: mutated", proc.pid)
- proc.execute(proc.execOpts, p, ProgNormal, StatFuzz)
+ proc.executeAndCollide(proc.execOpts, p, ProgNormal, StatFuzz)
}
}
}
@@ -145,7 +145,7 @@ func (proc *Proc) triageInput(item *WorkTriage) {
item.p, item.call = prog.Minimize(item.p, item.call, false,
func(p1 *prog.Prog, call1 int) bool {
for i := 0; i < minimizeAttempts; i++ {
- info := proc.execute(proc.execOptsNoCollide, p1, ProgNormal, StatMinimize)
+ info := proc.execute(proc.execOpts, p1, ProgNormal, StatMinimize)
if !reexecutionSuccess(info, &item.info, call1) {
// The call was not executed or failed.
continue
@@ -212,7 +212,7 @@ func (proc *Proc) smashInput(item *WorkSmash) {
p := item.p.Clone()
p.Mutate(proc.rnd, prog.RecommendedCalls, proc.fuzzer.choiceTable, fuzzerSnapshot.corpus)
log.Logf(1, "#%v: smash mutated", proc.pid)
- proc.execute(proc.execOpts, p, ProgNormal, StatSmash)
+ proc.executeAndCollide(proc.execOpts, p, ProgNormal, StatSmash)
}
}
@@ -274,6 +274,23 @@ func (proc *Proc) enqueueCallTriage(p *prog.Prog, flags ProgTypes, callIndex int
})
}
+func (proc *Proc) executeAndCollide(execOpts *ipc.ExecOpts, p *prog.Prog, flags ProgTypes, stat Stat) {
+ proc.execute(execOpts, p, flags, stat)
+
+ if proc.execOptsCollide.Flags&ipc.FlagThreaded == 0 {
+ // We cannot collide syscalls without being in the threaded mode.
+ return
+ }
+ const collideIterations = 2
+ for i := 0; i < collideIterations; i++ {
+ proc.executeRaw(proc.execOptsCollide, proc.randomCollide(p), StatCollide)
+ }
+}
+
+func (proc *Proc) randomCollide(origP *prog.Prog) *prog.Prog {
+ return prog.AssignRandomAsync(origP, proc.rnd)
+}
+
func (proc *Proc) executeRaw(opts *ipc.ExecOpts, p *prog.Prog, stat Stat) *ipc.ProgInfo {
if opts.Flags&ipc.FlagDedupCover == 0 {
log.Fatalf("dedup cover is not enabled")
diff --git a/tools/syz-execprog/execprog.go b/tools/syz-execprog/execprog.go
index 6c69e0ff6..d22b5a0bf 100644
--- a/tools/syz-execprog/execprog.go
+++ b/tools/syz-execprog/execprog.go
@@ -38,6 +38,19 @@ var (
flagHints = flag.Bool("hints", false, "do a hints-generation run")
flagEnable = flag.String("enable", "none", "enable only listed additional features")
flagDisable = flag.String("disable", "none", "enable all additional features except listed")
+ // The following flag is only kept to let syzkaller remain compatible with older execprog versions.
+ // In order to test incoming patches or perform bug bisection, syz-ci must use the exact syzkaller
+ // version that detected the bug (as descriptions and syntax could've already been changed), and
+ // therefore it must be able to invoke older versions of syz-execprog.
+ // Unfortunately there's no clean way to drop that flag from newer versions of syz-execprog. If it
+ // were false by default, it would be easy - we could modify `instance.ExecprogCmd` only to pass it
+ // when it's true - which would never be the case in the newer versions (this is how we got rid of
+ // fault injection args). But the collide flag was true by default, so it must be passed by value
+ // (-collide=%v). The least kludgy solution is to silently accept this flag also in the newer versions
+ // of syzkaller, but do not process it, as there's no such functionality anymore.
+ // Note, however, that we do not have to do the same for `syz-prog2c`, as `collide` was there false
+ // by default.
+ flagCollide = flag.Bool("collide", false, "(DEPRECATED) collide syscalls to provoke data races")
)
func main() {
@@ -73,6 +86,9 @@ func main() {
log.Logf(0, "%-24v: %v", feat.Name, feat.Reason)
}
}
+ if *flagCollide {
+ log.Fatalf("setting -collide to true is deprecated now")
+ }
config, execOpts := createConfig(target, features, featuresFlags)
if err = host.Setup(target, features, featuresFlags, config.Executor); err != nil {
log.Fatal(err)
diff --git a/tools/syz-prog2c/prog2c.go b/tools/syz-prog2c/prog2c.go
index aa9c146e1..7cea629f8 100644
--- a/tools/syz-prog2c/prog2c.go
+++ b/tools/syz-prog2c/prog2c.go
@@ -21,7 +21,6 @@ var (
flagArch = flag.String("arch", runtime.GOARCH, "target arch")
flagBuild = flag.Bool("build", false, "also build the generated program")
flagThreaded = flag.Bool("threaded", false, "create threaded program")
- flagCollide = flag.Bool("collide", false, "create collide program")
flagRepeat = flag.Int("repeat", 1, "repeat program that many times (<=0 - infinitely)")
flagProcs = flag.Int("procs", 1, "number of parallel processes")
flagSlowdown = flag.Int("slowdown", 1, "execution slowdown caused by emulation/instrumentation")
@@ -72,7 +71,6 @@ func main() {
}
opts := csource.Options{
Threaded: *flagThreaded,
- Collide: *flagCollide,
Repeat: *flagRepeat != 1,
RepeatTimes: *flagRepeat,
Procs: *flagProcs,
diff --git a/tools/syz-reprolist/reprolist.go b/tools/syz-reprolist/reprolist.go
index 0833a8593..a14740d05 100644
--- a/tools/syz-reprolist/reprolist.go
+++ b/tools/syz-reprolist/reprolist.go
@@ -177,9 +177,11 @@ func createProg2CArgs(bug *dashapi.BugReport, opts csource.Options, file string)
"-prog", file,
"-sandbox", opts.Sandbox,
fmt.Sprintf("-segv=%v", opts.HandleSegv),
- fmt.Sprintf("-collide=%v", opts.Collide),
fmt.Sprintf("-threaded=%v", opts.Threaded),
}
+ if opts.Collide {
+ args = append(args, "-collide")
+ }
if haveOSFlag {
args = append(args, "-os", *flagOS)
}