aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAleksandr Nogikh <nogikh@google.com>2021-09-03 16:20:07 +0000
committerAleksandr Nogikh <wp32pw@gmail.com>2021-09-22 15:40:02 +0200
commit1c202847db0380015a8920bfd21375c2d9f28ddb (patch)
tree6693da3a936398a9ac6678842ac181c5f0e3e429
parenta7ce77be27d8e3728b97122a005bc5b23298cfc3 (diff)
all: refactor fault injection into call props
Now that call properties mechanism is implemented, we can refactor fault injection. Unfortunately, it is impossible to remove all traces of the previous apprach. In reprolist and while performing syz-ci jobs, syzkaller still needs to parse the old format. Remove the old prog options-based approach whenever possible and replace it with the use of call properties.
-rw-r--r--executor/common.h8
-rw-r--r--executor/common_bsd.h2
-rw-r--r--executor/common_linux.h2
-rw-r--r--executor/executor.cc45
-rw-r--r--pkg/csource/common.go2
-rw-r--r--pkg/csource/csource.go4
-rw-r--r--pkg/csource/csource_test.go4
-rw-r--r--pkg/csource/generated.go12
-rw-r--r--pkg/csource/options.go12
-rw-r--r--pkg/csource/options_test.go46
-rw-r--r--pkg/instance/instance.go14
-rw-r--r--pkg/ipc/ipc.go9
-rw-r--r--pkg/repro/repro.go66
-rw-r--r--prog/analysis.go9
-rw-r--r--prog/decodeexec.go3
-rw-r--r--prog/encoding.go6
-rw-r--r--prog/encoding_test.go8
-rw-r--r--prog/encodingexec.go10
-rw-r--r--prog/encodingexec_test.go406
-rw-r--r--prog/minimization.go18
-rw-r--r--prog/minimization_test.go22
-rw-r--r--prog/parse.go29
-rw-r--r--prog/parse_test.go20
-rw-r--r--prog/prog.go13
-rw-r--r--sys/syz-sysgen/sysgen.go4
-rw-r--r--syz-fuzzer/proc.go25
-rw-r--r--tools/syz-execprog/execprog.go21
-rw-r--r--tools/syz-prog2c/prog2c.go5
28 files changed, 384 insertions, 441 deletions
diff --git a/executor/common.h b/executor/common.h
index 17c680b21..3c817a7e8 100644
--- a/executor/common.h
+++ b/executor/common.h
@@ -271,13 +271,19 @@ static void __attribute__((noinline)) remove_dir(const char* dir)
#endif
#if !GOOS_linux && !GOOS_netbsd
-#if SYZ_EXECUTOR
+#if SYZ_EXECUTOR || SYZ_FAULT
static int inject_fault(int nth)
{
return 0;
}
#endif
+#if SYZ_FAULT
+static void setup_fault()
+{
+}
+#endif
+
#if SYZ_EXECUTOR
static int fault_injected(int fail_fd)
{
diff --git a/executor/common_bsd.h b/executor/common_bsd.h
index d5030d482..332e68e5f 100644
--- a/executor/common_bsd.h
+++ b/executor/common_bsd.h
@@ -60,7 +60,7 @@ static int inject_fault(int nth)
en.scope = FAULT_SCOPE_LWP;
en.mode = 0; // FAULT_MODE_NTH_ONESHOT
- en.nth = nth + 2; // FAULT_NTH_MIN
+ en.nth = nth + 1; // FAULT_NTH_MIN
if (ioctl(fd, FAULT_IOC_ENABLE, &en) != 0)
failmsg("FAULT_IOC_ENABLE failed", "nth=%d", nth);
diff --git a/executor/common_linux.h b/executor/common_linux.h
index 215ef1a8a..d80883729 100644
--- a/executor/common_linux.h
+++ b/executor/common_linux.h
@@ -4283,7 +4283,7 @@ static int inject_fault(int nth)
if (fd == -1)
exitf("failed to open /proc/thread-self/fail-nth");
char buf[16];
- sprintf(buf, "%d", nth + 1);
+ sprintf(buf, "%d", nth);
if (write(fd, buf, strlen(buf)) != (ssize_t)strlen(buf))
exitf("failed to write /proc/thread-self/fail-nth");
return fd;
diff --git a/executor/executor.cc b/executor/executor.cc
index 950f37cba..1aea79898 100644
--- a/executor/executor.cc
+++ b/executor/executor.cc
@@ -151,11 +151,6 @@ static bool flag_coverage_filter;
// If true, then executor should write the comparisons data to fuzzer.
static bool flag_comparisons;
-// Inject fault into flag_fault_nth-th operation in flag_fault_call-th syscall.
-static bool flag_fault;
-static int flag_fault_call;
-static int flag_fault_nth;
-
// Tunable timeouts, received with execute_req.
static uint64 syscall_timeout_ms;
static uint64 program_timeout_ms;
@@ -170,6 +165,7 @@ const int kMaxCommands = 1000; // prog package knows about this constant (prog.e
const uint64 instr_eof = -1;
const uint64 instr_copyin = -2;
const uint64 instr_copyout = -3;
+const uint64 instr_setprops = -4;
const uint64 arg_const = 0;
const uint64 arg_result = 1;
@@ -281,8 +277,6 @@ struct execute_req {
uint64 env_flags;
uint64 exec_flags;
uint64 pid;
- uint64 fault_call;
- uint64 fault_nth;
uint64 syscall_timeout_ms;
uint64 program_timeout_ms;
uint64 slowdown_scale;
@@ -589,21 +583,17 @@ void receive_execute()
slowdown_scale = req.slowdown_scale;
flag_collect_cover = req.exec_flags & (1 << 0);
flag_dedup_cover = req.exec_flags & (1 << 1);
- flag_fault = req.exec_flags & (1 << 2);
- flag_comparisons = req.exec_flags & (1 << 3);
- flag_threaded = req.exec_flags & (1 << 4);
- flag_collide = req.exec_flags & (1 << 5);
- flag_coverage_filter = req.exec_flags & (1 << 6);
- flag_fault_call = req.fault_call;
- flag_fault_nth = req.fault_nth;
+ flag_comparisons = req.exec_flags & (1 << 2);
+ flag_threaded = req.exec_flags & (1 << 3);
+ flag_collide = req.exec_flags & (1 << 4);
+ flag_coverage_filter = req.exec_flags & (1 << 5);
if (!flag_threaded)
flag_collide = false;
- debug("[%llums] exec opts: procid=%llu threaded=%d collide=%d cover=%d comps=%d dedup=%d fault=%d/%d/%d"
+ debug("[%llums] exec opts: procid=%llu threaded=%d collide=%d cover=%d comps=%d dedup=%d"
" timeouts=%llu/%llu/%llu prog=%llu filter=%d\n",
current_time_ms() - start_time_ms, procid, flag_threaded, flag_collide,
- flag_collect_cover, flag_comparisons, flag_dedup_cover, flag_fault,
- flag_fault_call, flag_fault_nth, syscall_timeout_ms, program_timeout_ms, slowdown_scale,
- req.prog_size, flag_coverage_filter);
+ flag_collect_cover, flag_comparisons, flag_dedup_cover, syscall_timeout_ms,
+ program_timeout_ms, slowdown_scale, req.prog_size, flag_coverage_filter);
if (syscall_timeout_ms == 0 || program_timeout_ms <= syscall_timeout_ms || slowdown_scale == 0)
failmsg("bad timeouts", "syscall=%llu, program=%llu, scale=%llu",
syscall_timeout_ms, program_timeout_ms, slowdown_scale);
@@ -674,7 +664,9 @@ retry:
int call_index = 0;
uint64 prog_extra_timeout = 0;
uint64 prog_extra_cover_timeout = 0;
+ bool has_fault_injection = false;
call_props_t call_props;
+ memset(&call_props, 0, sizeof(call_props));
for (;;) {
uint64 call_num = read_input(&input_pos);
@@ -765,11 +757,14 @@ retry:
// The copyout will happen when/if the call completes.
continue;
}
+ if (call_num == instr_setprops) {
+ read_call_props_t(call_props, read_input(&input_pos, false));
+ continue;
+ }
// Normal syscall.
if (call_num >= ARRAY_SIZE(syscalls))
failmsg("invalid syscall number", "call_num=%llu", call_num);
- read_call_props_t(call_props, read_input(&input_pos, false));
const call_t* call = &syscalls[call_num];
if (call->attrs.disabled)
failmsg("executing disabled syscall", "syscall=%s", call->name);
@@ -779,6 +774,7 @@ retry:
prog_extra_cover_timeout = std::max(prog_extra_cover_timeout, 500 * slowdown_scale);
if (strncmp(syscalls[call_num].name, "syz_80211_inject_frame", strlen("syz_80211_inject_frame")) == 0)
prog_extra_cover_timeout = std::max(prog_extra_cover_timeout, 300 * slowdown_scale);
+ has_fault_injection |= (call_props.fail_nth > 0);
uint64 copyout_index = read_input(&input_pos);
uint64 num_args = read_input(&input_pos);
if (num_args > kMaxArgs)
@@ -819,6 +815,7 @@ retry:
event_set(&th->done);
handle_completion(th);
}
+ memset(&call_props, 0, sizeof(call_props));
}
if (!colliding && !collide && running > 0) {
@@ -865,7 +862,7 @@ retry:
}
}
- if (flag_collide && !flag_fault && !colliding && !collide) {
+ if (flag_collide && !colliding && !has_fault_injection && !collide) {
debug("enabling collider\n");
collide = colliding = true;
goto retry;
@@ -1154,10 +1151,10 @@ void execute_call(thread_t* th)
int fail_fd = -1;
th->soft_fail_state = false;
- if (flag_fault && th->call_index == flag_fault_call) {
+ if (th->call_props.fail_nth > 0) {
if (collide)
fail("both collide and fault injection are enabled");
- fail_fd = inject_fault(flag_fault_nth);
+ fail_fd = inject_fault(th->call_props.fail_nth);
th->soft_fail_state = true;
}
@@ -1182,7 +1179,7 @@ void execute_call(thread_t* th)
}
th->fault_injected = false;
- if (flag_fault && th->call_index == flag_fault_call) {
+ if (th->call_props.fail_nth > 0) {
th->fault_injected = fault_injected(fail_fd);
}
@@ -1190,7 +1187,7 @@ void execute_call(thread_t* th)
th->id, current_time_ms() - start_time_ms, call->name, (uint64)th->res, th->reserrno);
if (flag_coverage)
debug("cover=%u ", th->cov.size);
- if (flag_fault && th->call_index == flag_fault_call)
+ if (th->call_props.fail_nth > 0)
debug("fault=%d ", th->fault_injected);
debug("\n");
}
diff --git a/pkg/csource/common.go b/pkg/csource/common.go
index a0d119bed..a1525e24f 100644
--- a/pkg/csource/common.go
+++ b/pkg/csource/common.go
@@ -107,7 +107,7 @@ func commonDefines(p *prog.Prog, opts Options) map[string]bool {
"SYZ_REPEAT": opts.Repeat,
"SYZ_REPEAT_TIMES": opts.RepeatTimes > 1,
"SYZ_MULTI_PROC": opts.Procs > 1,
- "SYZ_FAULT": opts.Fault,
+ "SYZ_FAULT": p.HasFaultInjection(),
"SYZ_LEAK": opts.Leak,
"SYZ_NET_INJECTION": opts.NetInjection,
"SYZ_NET_DEVICES": opts.NetDevices,
diff --git a/pkg/csource/csource.go b/pkg/csource/csource.go
index c30db6b9d..5d0d4efd1 100644
--- a/pkg/csource/csource.go
+++ b/pkg/csource/csource.go
@@ -203,8 +203,8 @@ func (ctx *context) generateCalls(p prog.ExecProg, trace bool) ([]string, []uint
ctx.copyin(w, &csumSeq, copyin)
}
- if ctx.opts.Fault && ctx.opts.FaultCall == ci {
- fmt.Fprintf(w, "\tinject_fault(%v);\n", ctx.opts.FaultNth)
+ if call.Props.FailNth > 0 {
+ fmt.Fprintf(w, "\tinject_fault(%v);\n", call.Props.FailNth)
}
// Call itself.
callName := call.Meta.CallName
diff --git a/pkg/csource/csource_test.go b/pkg/csource/csource_test.go
index 9e6fd1a59..ebf22d5f9 100644
--- a/pkg/csource/csource_test.go
+++ b/pkg/csource/csource_test.go
@@ -58,6 +58,10 @@ func testTarget(t *testing.T, target *prog.Target, full bool) {
// Testing 2 programs takes too long since we have lots of options permutations and OS/arch.
// So we use the as-is in short tests and minimized version in full tests.
syzProg := target.GenerateAllSyzProg(rs)
+ if len(syzProg.Calls) > 0 {
+ // Test fault injection generation as well.
+ p.Calls[0].Props.FailNth = 1
+ }
var opts []Options
if !full || testing.Short() {
p.Calls = append(p.Calls, syzProg.Calls...)
diff --git a/pkg/csource/generated.go b/pkg/csource/generated.go
index b4880f989..27a7bc132 100644
--- a/pkg/csource/generated.go
+++ b/pkg/csource/generated.go
@@ -238,13 +238,19 @@ static void __attribute__((noinline)) remove_dir(const char* dir)
#endif
#if !GOOS_linux && !GOOS_netbsd
-#if SYZ_EXECUTOR
+#if SYZ_EXECUTOR || SYZ_FAULT
static int inject_fault(int nth)
{
return 0;
}
#endif
+#if SYZ_FAULT
+static void setup_fault()
+{
+}
+#endif
+
#if SYZ_EXECUTOR
static int fault_injected(int fail_fd)
{
@@ -1600,7 +1606,7 @@ static int inject_fault(int nth)
en.scope = FAULT_SCOPE_LWP;
en.mode = 0;
- en.nth = nth + 2;
+ en.nth = nth + 1;
if (ioctl(fd, FAULT_IOC_ENABLE, &en) != 0)
failmsg("FAULT_IOC_ENABLE failed", "nth=%d", nth);
@@ -9348,7 +9354,7 @@ static int inject_fault(int nth)
if (fd == -1)
exitf("failed to open /proc/thread-self/fail-nth");
char buf[16];
- sprintf(buf, "%d", nth + 1);
+ sprintf(buf, "%d", nth);
if (write(fd, buf, strlen(buf)) != (ssize_t)strlen(buf))
exitf("failed to write /proc/thread-self/fail-nth");
return fd;
diff --git a/pkg/csource/options.go b/pkg/csource/options.go
index a0e1fe7ac..36490c8b8 100644
--- a/pkg/csource/options.go
+++ b/pkg/csource/options.go
@@ -26,10 +26,6 @@ type Options struct {
Slowdown int `json:"slowdown"`
Sandbox string `json:"sandbox"`
- Fault bool `json:"fault,omitempty"` // inject fault into FaultCall/FaultNth
- FaultCall int `json:"fault_call,omitempty"`
- FaultNth int `json:"fault_nth,omitempty"`
-
Leak bool `json:"leak,omitempty"` // do leak checking
// These options allow for a more fine-tuned control over the generated C code.
@@ -54,6 +50,14 @@ type Options struct {
// which allows to detect hangs.
Repro bool `json:"repro,omitempty"`
Trace bool `json:"trace,omitempty"`
+ LegacyOptions
+}
+
+// These are legacy options, they remain only for the sake of backward compatibility.
+type LegacyOptions struct {
+ Fault bool `json:"fault,omitempty"`
+ FaultCall int `json:"fault_call,omitempty"`
+ FaultNth int `json:"fault_nth,omitempty"`
}
// Check checks if the opts combination is valid or not.
diff --git a/pkg/csource/options_test.go b/pkg/csource/options_test.go
index 758e91ea1..ba31e4c95 100644
--- a/pkg/csource/options_test.go
+++ b/pkg/csource/options_test.go
@@ -27,7 +27,7 @@ func TestParseOptions(t *testing.T) {
func TestParseOptionsCanned(t *testing.T) {
// Dashboard stores csource options with syzkaller reproducers,
// so we need to be able to parse old formats.
- // nolint: lll
+ // nolint: lll, dupl
canned := map[string]Options{
`{"threaded":true,"collide":true,"repeat":true,"procs":10,"sandbox":"namespace",
"fault":true,"fault_call":1,"fault_nth":2,"tun":true,"tmpdir":true,"cgroups":true,
@@ -39,9 +39,6 @@ func TestParseOptionsCanned(t *testing.T) {
Procs: 10,
Slowdown: 1,
Sandbox: "namespace",
- Fault: true,
- FaultCall: 1,
- FaultNth: 2,
NetInjection: true,
NetDevices: true,
NetReset: true,
@@ -51,6 +48,11 @@ func TestParseOptionsCanned(t *testing.T) {
UseTmpDir: true,
HandleSegv: true,
Repro: true,
+ LegacyOptions: LegacyOptions{
+ Fault: true,
+ FaultCall: 1,
+ FaultNth: 2,
+ },
},
`{"threaded":true,"collide":true,"repeat":true,"procs":10,"sandbox":"android",
"fault":true,"fault_call":1,"fault_nth":2,"tun":true,"tmpdir":true,"cgroups":true,
@@ -62,9 +64,6 @@ func TestParseOptionsCanned(t *testing.T) {
Procs: 10,
Slowdown: 1,
Sandbox: "android",
- Fault: true,
- FaultCall: 1,
- FaultNth: 2,
NetInjection: true,
NetDevices: true,
NetReset: true,
@@ -74,6 +73,11 @@ func TestParseOptionsCanned(t *testing.T) {
UseTmpDir: true,
HandleSegv: true,
Repro: true,
+ LegacyOptions: LegacyOptions{
+ Fault: true,
+ FaultCall: 1,
+ FaultNth: 2,
+ },
},
"{Threaded:true Collide:true Repeat:true Procs:1 Sandbox:none Fault:false FaultCall:-1 FaultNth:0 EnableTun:true UseTmpDir:true HandleSegv:true WaitRepeat:true Debug:false Repro:false}": {
Threaded: true,
@@ -82,9 +86,6 @@ func TestParseOptionsCanned(t *testing.T) {
Procs: 1,
Slowdown: 1,
Sandbox: "none",
- Fault: false,
- FaultCall: -1,
- FaultNth: 0,
NetInjection: true,
Cgroups: false,
BinfmtMisc: false,
@@ -92,6 +93,11 @@ func TestParseOptionsCanned(t *testing.T) {
UseTmpDir: true,
HandleSegv: true,
Repro: false,
+ LegacyOptions: LegacyOptions{
+ Fault: false,
+ FaultCall: -1,
+ FaultNth: 0,
+ },
},
"{Threaded:true Collide:true Repeat:true Procs:1 Sandbox: Fault:false FaultCall:-1 FaultNth:0 EnableTun:true UseTmpDir:true HandleSegv:true WaitRepeat:true Debug:false Repro:false}": {
Threaded: true,
@@ -100,9 +106,6 @@ func TestParseOptionsCanned(t *testing.T) {
Procs: 1,
Slowdown: 1,
Sandbox: "",
- Fault: false,
- FaultCall: -1,
- FaultNth: 0,
NetInjection: true,
Cgroups: false,
BinfmtMisc: false,
@@ -110,6 +113,11 @@ func TestParseOptionsCanned(t *testing.T) {
UseTmpDir: true,
HandleSegv: true,
Repro: false,
+ LegacyOptions: LegacyOptions{
+ Fault: false,
+ FaultCall: -1,
+ FaultNth: 0,
+ },
},
"{Threaded:false Collide:true Repeat:true Procs:1 Sandbox:namespace Fault:false FaultCall:-1 FaultNth:0 EnableTun:true UseTmpDir:true EnableCgroups:true HandleSegv:true WaitRepeat:true Debug:false Repro:false}": {
Threaded: false,
@@ -118,9 +126,6 @@ func TestParseOptionsCanned(t *testing.T) {
Procs: 1,
Slowdown: 1,
Sandbox: "namespace",
- Fault: false,
- FaultCall: -1,
- FaultNth: 0,
NetInjection: true,
Cgroups: true,
BinfmtMisc: false,
@@ -128,6 +133,11 @@ func TestParseOptionsCanned(t *testing.T) {
UseTmpDir: true,
HandleSegv: true,
Repro: false,
+ LegacyOptions: LegacyOptions{
+ Fault: false,
+ FaultCall: -1,
+ FaultNth: 0,
+ },
},
}
for data, want := range canned {
@@ -211,9 +221,7 @@ func enumerateField(OS string, opt Options, field int) []Options {
fld.SetInt(val)
opts = append(opts, opt)
}
- } else if fldName == "FaultCall" {
- opts = append(opts, opt)
- } else if fldName == "FaultNth" {
+ } else if fldName == "LegacyOptions" {
opts = append(opts, opt)
} else if fld.Kind() == reflect.Bool {
for _, v := range []bool{false, true} {
diff --git a/pkg/instance/instance.go b/pkg/instance/instance.go
index 92c6d1353..68920577b 100644
--- a/pkg/instance/instance.go
+++ b/pkg/instance/instance.go
@@ -480,17 +480,23 @@ func ExecprogCmd(execprog, executor, OS, arch, sandbox string, repeat, threaded,
osArg = " -os=" + OS
}
optionalArg := ""
+
+ if faultCall >= 0 {
+ optionalArg = fmt.Sprintf(" -fault_call=%v -fault_nth=%v",
+ faultCall, faultNth)
+ }
+
if optionalFlags {
- optionalArg = " " + tool.OptionalFlags([]tool.Flag{
+ optionalArg += " " + tool.OptionalFlags([]tool.Flag{
{Name: "slowdown", Value: fmt.Sprint(slowdown)},
})
}
+
return fmt.Sprintf("%v -executor=%v -arch=%v%v -sandbox=%v"+
- " -procs=%v -repeat=%v -threaded=%v -collide=%v -cover=0"+
- " -fault_call=%v -fault_nth=%v%v %v",
+ " -procs=%v -repeat=%v -threaded=%v -collide=%v -cover=0%v %v",
execprog, executor, arch, osArg, sandbox,
procs, repeatCount, threaded, collide,
- faultCall, faultNth, optionalArg, progFile)
+ optionalArg, progFile)
}
var MakeBin = func() string {
diff --git a/pkg/ipc/ipc.go b/pkg/ipc/ipc.go
index 643f16582..1d6ba4f40 100644
--- a/pkg/ipc/ipc.go
+++ b/pkg/ipc/ipc.go
@@ -50,7 +50,6 @@ type ExecFlags uint64
const (
FlagCollectCover ExecFlags = 1 << iota // collect coverage
FlagDedupCover // deduplicate coverage in executor
- FlagInjectFault // inject a fault in this execution (see ExecOpts)
FlagCollectComps // collect KCOV comparisons
FlagThreaded // use multiple threads to mitigate blocked syscalls
FlagCollide // collide syscalls to provoke data races
@@ -58,9 +57,7 @@ const (
)
type ExecOpts struct {
- Flags ExecFlags
- FaultCall int // call index for fault injection (0-based)
- FaultNth int // fault n-th operation in the call (0-based)
+ Flags ExecFlags
}
// Config is the configuration for Env.
@@ -509,8 +506,6 @@ type executeReq struct {
envFlags uint64 // env flags
execFlags uint64 // exec flags
pid uint64
- faultCall uint64
- faultNth uint64
syscallTimeoutMS uint64
programTimeoutMS uint64
slowdownScale uint64
@@ -733,8 +728,6 @@ func (c *command) exec(opts *ExecOpts, progData []byte) (output []byte, hanged b
envFlags: uint64(c.config.Flags),
execFlags: uint64(opts.Flags),
pid: uint64(c.pid),
- faultCall: uint64(opts.FaultCall),
- faultNth: uint64(opts.FaultNth),
syscallTimeoutMS: uint64(c.config.Timeouts.Syscall / time.Millisecond),
programTimeoutMS: uint64(c.config.Timeouts.Program / time.Millisecond),
slowdownScale: uint64(c.config.Timeouts.Scale),
diff --git a/pkg/repro/repro.go b/pkg/repro/repro.go
index 52f456a83..8eebb5c6d 100644
--- a/pkg/repro/repro.go
+++ b/pkg/repro/repro.go
@@ -351,12 +351,6 @@ func (ctx *context) extractProgSingle(entries []*prog.LogEntry, duration time.Du
opts := ctx.startOpts
for _, ent := range entries {
- opts.Fault = ent.Fault
- opts.FaultCall = ent.FaultCall
- opts.FaultNth = ent.FaultNth
- if opts.FaultCall < 0 || opts.FaultCall >= len(ent.P.Calls) {
- opts.FaultCall = len(ent.P.Calls) - 1
- }
crashed, err := ctx.testProg(ent.P, duration, opts)
if err != nil {
return nil, err
@@ -409,8 +403,6 @@ func (ctx *context) extractProgBisect(entries []*prog.LogEntry, baseDuration tim
prog.Calls = append(prog.Calls, entry.P.Calls...)
}
dur := duration(len(entries)) * 3 / 2
-
- // Execute the program without fault injection.
crashed, err := ctx.testProg(prog, dur, opts)
if err != nil {
return nil, err
@@ -425,32 +417,6 @@ func (ctx *context) extractProgBisect(entries []*prog.LogEntry, baseDuration tim
return res, nil
}
- // Try with fault injection.
- calls := 0
- for _, entry := range entries {
- if entry.Fault {
- opts.FaultCall = calls + entry.FaultCall
- opts.FaultNth = entry.FaultNth
- if entry.FaultCall < 0 || entry.FaultCall >= len(entry.P.Calls) {
- opts.FaultCall = calls + len(entry.P.Calls) - 1
- }
- crashed, err := ctx.testProg(prog, dur, opts)
- if err != nil {
- return nil, err
- }
- if crashed {
- res := &Result{
- Prog: prog,
- Duration: dur,
- Opts: opts,
- }
- ctx.reproLogf(3, "bisect: concatenation succeeded with fault injection")
- return res, nil
- }
- }
- calls += len(entry.P.Calls)
- }
-
ctx.reproLogf(3, "bisect: concatenation failed")
return nil, nil
}
@@ -463,11 +429,7 @@ func (ctx *context) minimizeProg(res *Result) (*Result, error) {
ctx.stats.MinimizeProgTime = time.Since(start)
}()
- call := -1
- if res.Opts.Fault {
- call = res.Opts.FaultCall
- }
- res.Prog, res.Opts.FaultCall = prog.Minimize(res.Prog, call, true,
+ res.Prog, _ = prog.Minimize(res.Prog, -1, true,
func(p1 *prog.Prog, callIndex int) bool {
crashed, err := ctx.testProg(p1, res.Duration, res.Opts)
if err != nil {
@@ -482,12 +444,13 @@ func (ctx *context) minimizeProg(res *Result) (*Result, error) {
// Simplify repro options (threaded, collide, sandbox, etc).
func (ctx *context) simplifyProg(res *Result) (*Result, error) {
- ctx.reproLogf(2, "simplifying guilty program")
+ ctx.reproLogf(2, "simplifying guilty program options")
start := time.Now()
defer func() {
ctx.stats.SimplifyProgTime = time.Since(start)
}()
+ // Do further simplifications.
for _, simplify := range progSimplifies {
opts := res.Opts
if !simplify(&opts) || !checkOpts(&opts, ctx.timeouts, res.Duration) {
@@ -580,11 +543,6 @@ func checkOpts(opts *csource.Options, timeouts targets.Timeouts, timeout time.Du
func (ctx *context) testProg(p *prog.Prog, duration time.Duration, opts csource.Options) (crashed bool, err error) {
entry := prog.LogEntry{P: p}
- if opts.Fault {
- entry.Fault = true
- entry.FaultCall = opts.FaultCall
- entry.FaultNth = opts.FaultNth
- }
return ctx.testProgs([]*prog.LogEntry{&entry}, duration, opts)
}
@@ -610,9 +568,6 @@ func (ctx *context) testProgs(entries []*prog.LogEntry, duration time.Duration,
return false, fmt.Errorf("failed to copy to VM: %v", err)
}
- if !opts.Fault {
- opts.FaultCall = -1
- }
program := entries[0].P.String()
if len(entries) > 1 {
program = "["
@@ -820,11 +775,7 @@ func chunksToStr(chunks [][]*prog.LogEntry) string {
func encodeEntries(entries []*prog.LogEntry) []byte {
buf := new(bytes.Buffer)
for _, ent := range entries {
- opts := ""
- if ent.Fault {
- opts = fmt.Sprintf(" (fault-call:%v fault-nth:%v)", ent.FaultCall, ent.FaultNth)
- }
- fmt.Fprintf(buf, "executing program %v%v:\n%v", ent.Proc, opts, string(ent.P.Serialize()))
+ fmt.Fprintf(buf, "executing program %v:\n%v", ent.Proc, string(ent.P.Serialize()))
}
return buf.Bytes()
}
@@ -833,15 +784,6 @@ type Simplify func(opts *csource.Options) bool
var progSimplifies = []Simplify{
func(opts *csource.Options) bool {
- if !opts.Fault {
- return false
- }
- opts.Fault = false
- opts.FaultCall = 0
- opts.FaultNth = 0
- return true
- },
- func(opts *csource.Options) bool {
if !opts.Collide {
return false
}
diff --git a/prog/analysis.go b/prog/analysis.go
index 0ac0a97b9..6643941ff 100644
--- a/prog/analysis.go
+++ b/prog/analysis.go
@@ -176,6 +176,15 @@ func RequiredFeatures(p *Prog) (bitmasks, csums bool) {
return
}
+func (p *Prog) HasFaultInjection() bool {
+ for _, call := range p.Calls {
+ if call.Props.FailNth > 0 {
+ return true
+ }
+ }
+ return false
+}
+
type CallFlags int
const (
diff --git a/prog/decodeexec.go b/prog/decodeexec.go
index ff1ab5727..f803d5b4c 100644
--- a/prog/decodeexec.go
+++ b/prog/decodeexec.go
@@ -115,13 +115,14 @@ func (dec *execDecoder) parse() {
case execInstrEOF:
dec.commitCall()
return
+ case execInstrSetProps:
+ dec.readCallProps(&dec.call.Props)
default:
dec.commitCall()
if instr >= uint64(len(dec.target.Syscalls)) {
dec.setErr(fmt.Errorf("bad syscall %v", instr))
return
}
- dec.readCallProps(&dec.call.Props)
dec.call.Meta = dec.target.Syscalls[instr]
dec.call.Index = dec.read()
for i := dec.read(); i > 0; i-- {
diff --git a/prog/encoding.go b/prog/encoding.go
index 3f1c918a7..9161da6b2 100644
--- a/prog/encoding.go
+++ b/prog/encoding.go
@@ -81,11 +81,9 @@ func (ctx *serializer) call(c *Call) {
}
ctx.printf(")")
- defaultProps := DefaultCallProps()
anyChangedProps := false
c.Props.ForeachProp(func(name, key string, value reflect.Value) {
- defaultValue := reflect.ValueOf(defaultProps).FieldByName(name)
- if reflect.DeepEqual(value.Interface(), defaultValue.Interface()) {
+ if value.IsZero() {
return
}
@@ -352,7 +350,7 @@ func (p *parser) parseProg() (*Prog, error) {
func (p *parser) parseCallProps() CallProps {
nameToValue := map[string]reflect.Value{}
- callProps := DefaultCallProps()
+ callProps := CallProps{}
callProps.ForeachProp(func(_, key string, value reflect.Value) {
nameToValue[key] = value
})
diff --git a/prog/encoding_test.go b/prog/encoding_test.go
index d7171248f..11e71867f 100644
--- a/prog/encoding_test.go
+++ b/prog/encoding_test.go
@@ -327,6 +327,10 @@ func TestDeserialize(t *testing.T) {
Out: `test$opt2(0x0) (fail_nth: 1)`,
StrictErr: `unknown call property: non_existing_prop`,
},
+ {
+ In: `test$opt2(0x0) (fail_nth: 0)`,
+ Out: `test$opt2(0x0)`,
+ },
})
}
@@ -409,11 +413,11 @@ func TestSerializeCallProps(t *testing.T) {
tests := []SerializeCallPropsTest{
{
"serialize0(0x0)\n",
- []CallProps{DefaultCallProps()},
+ []CallProps{{}},
},
{
"serialize0(0x0) ()\n",
- []CallProps{DefaultCallProps()},
+ []CallProps{{}},
},
{
"serialize0(0x0) (fail_nth: 5)\n",
diff --git a/prog/encodingexec.go b/prog/encodingexec.go
index 296fcf0cf..fea114717 100644
--- a/prog/encodingexec.go
+++ b/prog/encodingexec.go
@@ -6,7 +6,7 @@
// Exec format is an sequence of uint64's which encodes a sequence of calls.
// The sequence is terminated by a speciall call execInstrEOF.
-// Each call is (call ID, call props, copyout index, number of arguments, arguments...).
+// Each call is (call ID, copyout index, number of arguments, arguments...).
// Each argument is (type, size, value).
// There are 4 types of arguments:
// - execArgConst: value is const value
@@ -30,6 +30,7 @@ const (
execInstrEOF = ^uint64(iota)
execInstrCopyin
execInstrCopyout
+ execInstrSetProps
)
const (
@@ -88,10 +89,12 @@ func (w *execContext) serializeCall(c *Call) {
// Generate checksum calculation instructions starting from the last one,
// since checksum values can depend on values of the latter ones
w.writeChecksums()
+ if !reflect.DeepEqual(c.Props, CallProps{}) {
+ // Push call properties.
+ w.writeCallProps(c.Props)
+ }
// Generate the call itself.
w.write(uint64(c.Meta.ID))
- // Generate call properties fragment.
- w.writeCallProps(c.Props)
if c.Ret != nil && len(c.Ret.uses) != 0 {
if _, ok := w.args[c.Ret]; ok {
panic("argInfo is already created for return value")
@@ -129,6 +132,7 @@ type argInfo struct {
}
func (w *execContext) writeCallProps(props CallProps) {
+ w.write(execInstrSetProps)
props.ForeachProp(func(_, _ string, value reflect.Value) {
switch kind := value.Kind(); kind {
case reflect.Int:
diff --git a/prog/encodingexec_test.go b/prog/encodingexec_test.go
index a7085be0c..4a8e5c7fd 100644
--- a/prog/encodingexec_test.go
+++ b/prog/encodingexec_test.go
@@ -54,27 +54,6 @@ func TestSerializeForExec(t *testing.T) {
buf[7] = byte(v >> 56)
return HostEndian.Uint64(buf)
}
-
- join := func(objects ...interface{}) []uint64 {
- ret := []uint64{}
- for _, val := range objects {
- switch v := val.(type) {
- case uint64:
- ret = append(ret, v)
- case int:
- ret = append(ret, uint64(v))
- case []uint64:
- ret = append(ret, v...)
- default:
- panic(fmt.Sprintf("unsupported object type %T", v))
- }
- }
- return ret
- }
-
- defaultCallPropsSlice := []uint64{
- 0xFFFFFFFFFFFFFFFF,
- }
tests := []struct {
prog string
serialized []uint64
@@ -82,244 +61,215 @@ func TestSerializeForExec(t *testing.T) {
}{
{
"test()",
- join(
- callID("test"), defaultCallPropsSlice,
- ExecNoCopyout, 0,
+ []uint64{
+ callID("test"), ExecNoCopyout, 0,
execInstrEOF,
- ),
+ },
&ExecProg{
Calls: []ExecCall{
{
Meta: target.SyscallMap["test"],
Index: ExecNoCopyout,
- Props: DefaultCallProps(),
},
},
},
},
{
"test$int(0x1, 0x2, 0x3, 0x4, 0x5)",
- join(
- callID("test$int"), defaultCallPropsSlice,
- ExecNoCopyout, 5,
+ []uint64{
+ callID("test$int"), ExecNoCopyout, 5,
execArgConst, 8, 1,
execArgConst, 1, 2,
execArgConst, 2, 3,
execArgConst, 4, 4,
execArgConst, 8, 5,
execInstrEOF,
- ),
- nil,
- },
- {
- "test() (fail_nth: 3)",
- join(
- callID("test"),
- 3,
- ExecNoCopyout, 0,
- execInstrEOF,
- ),
+ },
nil,
},
{
"test$align0(&(0x7f0000000000)={0x1, 0x2, 0x3, 0x4, 0x5})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 2, 1,
- execInstrCopyin, dataOffset+4, execArgConst, 4, 2,
- execInstrCopyin, dataOffset+8, execArgConst, 1, 3,
- execInstrCopyin, dataOffset+10, execArgConst, 2, 4,
- execInstrCopyin, dataOffset+16, execArgConst, 8, 5,
- callID("test$align0"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 2, 1,
+ execInstrCopyin, dataOffset + 4, execArgConst, 4, 2,
+ execInstrCopyin, dataOffset + 8, execArgConst, 1, 3,
+ execInstrCopyin, dataOffset + 10, execArgConst, 2, 4,
+ execInstrCopyin, dataOffset + 16, execArgConst, 8, 5,
+ callID("test$align0"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$align1(&(0x7f0000000000)={0x1, 0x2, 0x3, 0x4, 0x5})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 2, 1,
- execInstrCopyin, dataOffset+2, execArgConst, 4, 2,
- execInstrCopyin, dataOffset+6, execArgConst, 1, 3,
- execInstrCopyin, dataOffset+7, execArgConst, 2, 4,
- execInstrCopyin, dataOffset+9, execArgConst, 8, 5,
- callID("test$align1"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 2, 1,
+ execInstrCopyin, dataOffset + 2, execArgConst, 4, 2,
+ execInstrCopyin, dataOffset + 6, execArgConst, 1, 3,
+ execInstrCopyin, dataOffset + 7, execArgConst, 2, 4,
+ execInstrCopyin, dataOffset + 9, execArgConst, 8, 5,
+ callID("test$align1"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$align2(&(0x7f0000000000)={0x42, {[0x43]}, {[0x44]}})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 1, 0x42,
- execInstrCopyin, dataOffset+1, execArgConst, 2, 0x43,
- execInstrCopyin, dataOffset+4, execArgConst, 2, 0x44,
- callID("test$align2"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 1, 0x42,
+ execInstrCopyin, dataOffset + 1, execArgConst, 2, 0x43,
+ execInstrCopyin, dataOffset + 4, execArgConst, 2, 0x44,
+ callID("test$align2"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$align3(&(0x7f0000000000)={0x42, {0x43}, {0x44}})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 1, 0x42,
- execInstrCopyin, dataOffset+1, execArgConst, 1, 0x43,
- execInstrCopyin, dataOffset+4, execArgConst, 1, 0x44,
- callID("test$align3"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 1, 0x42,
+ execInstrCopyin, dataOffset + 1, execArgConst, 1, 0x43,
+ execInstrCopyin, dataOffset + 4, execArgConst, 1, 0x44,
+ callID("test$align3"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$align4(&(0x7f0000000000)={{0x42, 0x43}, 0x44})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 1, 0x42,
- execInstrCopyin, dataOffset+1, execArgConst, 2, 0x43,
- execInstrCopyin, dataOffset+4, execArgConst, 1, 0x44,
- callID("test$align4"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 1, 0x42,
+ execInstrCopyin, dataOffset + 1, execArgConst, 2, 0x43,
+ execInstrCopyin, dataOffset + 4, execArgConst, 1, 0x44,
+ callID("test$align4"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$align5(&(0x7f0000000000)={{0x42, []}, {0x43, [0x44, 0x45, 0x46]}, 0x47})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 8, 0x42,
- execInstrCopyin, dataOffset+8, execArgConst, 8, 0x43,
- execInstrCopyin, dataOffset+16, execArgConst, 2, 0x44,
- execInstrCopyin, dataOffset+18, execArgConst, 2, 0x45,
- execInstrCopyin, dataOffset+20, execArgConst, 2, 0x46,
- execInstrCopyin, dataOffset+22, execArgConst, 1, 0x47,
- callID("test$align5"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 8, 0x42,
+ execInstrCopyin, dataOffset + 8, execArgConst, 8, 0x43,
+ execInstrCopyin, dataOffset + 16, execArgConst, 2, 0x44,
+ execInstrCopyin, dataOffset + 18, execArgConst, 2, 0x45,
+ execInstrCopyin, dataOffset + 20, execArgConst, 2, 0x46,
+ execInstrCopyin, dataOffset + 22, execArgConst, 1, 0x47,
+ callID("test$align5"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$align6(&(0x7f0000000000)={0x42, [0x43]})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 1, 0x42,
- execInstrCopyin, dataOffset+4, execArgConst, 4, 0x43,
- callID("test$align6"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 1, 0x42,
+ execInstrCopyin, dataOffset + 4, execArgConst, 4, 0x43,
+ callID("test$align6"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$union0(&(0x7f0000000000)={0x1, @f2=0x2})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 8, 1,
- execInstrCopyin, dataOffset+8, execArgConst, 1, 2,
- callID("test$union0"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 8, 1,
+ execInstrCopyin, dataOffset + 8, execArgConst, 1, 2,
+ callID("test$union0"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$union1(&(0x7f0000000000)={@f1=0x42, 0x43})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 4, 0x42,
- execInstrCopyin, dataOffset+8, execArgConst, 1, 0x43,
- callID("test$union1"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 4, 0x42,
+ execInstrCopyin, dataOffset + 8, execArgConst, 1, 0x43,
+ callID("test$union1"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$union2(&(0x7f0000000000)={@f1=0x42, 0x43})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 4, 0x42,
- execInstrCopyin, dataOffset+4, execArgConst, 1, 0x43,
- callID("test$union2"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 4, 0x42,
+ execInstrCopyin, dataOffset + 4, execArgConst, 1, 0x43,
+ callID("test$union2"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$array0(&(0x7f0000000000)={0x1, [@f0=0x2, @f1=0x3], 0x4})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 1, 1,
- execInstrCopyin, dataOffset+1, execArgConst, 2, 2,
- execInstrCopyin, dataOffset+3, execArgConst, 8, 3,
- execInstrCopyin, dataOffset+11, execArgConst, 8, 4,
- callID("test$array0"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 1, 1,
+ execInstrCopyin, dataOffset + 1, execArgConst, 2, 2,
+ execInstrCopyin, dataOffset + 3, execArgConst, 8, 3,
+ execInstrCopyin, dataOffset + 11, execArgConst, 8, 4,
+ callID("test$array0"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$array1(&(0x7f0000000000)={0x42, \"0102030405\"})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 1, 0x42,
- execInstrCopyin, dataOffset+1, execArgData, 5, letoh64(0x0504030201),
- callID("test$array1"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 1, 0x42,
+ execInstrCopyin, dataOffset + 1, execArgData, 5, letoh64(0x0504030201),
+ callID("test$array1"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$array2(&(0x7f0000000000)={0x42, \"aaaaaaaabbbbbbbbccccccccdddddddd\", 0x43})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 2, 0x42,
- execInstrCopyin, dataOffset+2, execArgData, 16, letoh64(0xbbbbbbbbaaaaaaaa), letoh64(0xddddddddcccccccc),
- execInstrCopyin, dataOffset+18, execArgConst, 2, 0x43,
- callID("test$array2"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 2, 0x42,
+ execInstrCopyin, dataOffset + 2, execArgData, 16, letoh64(0xbbbbbbbbaaaaaaaa), letoh64(0xddddddddcccccccc),
+ execInstrCopyin, dataOffset + 18, execArgConst, 2, 0x43,
+ callID("test$array2"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$end0(&(0x7f0000000000)={0x42, 0x42, 0x42, 0x42})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 1, 0x42,
- execInstrCopyin, dataOffset+1, execArgConst, 2|1<<8, 0x42,
- execInstrCopyin, dataOffset+3, execArgConst, 4|1<<8, 0x42,
- execInstrCopyin, dataOffset+7, execArgConst, 8|1<<8, 0x42,
- callID("test$end0"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 1, 0x42,
+ execInstrCopyin, dataOffset + 1, execArgConst, 2 | 1<<8, 0x42,
+ execInstrCopyin, dataOffset + 3, execArgConst, 4 | 1<<8, 0x42,
+ execInstrCopyin, dataOffset + 7, execArgConst, 8 | 1<<8, 0x42,
+ callID("test$end0"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$end1(&(0x7f0000000000)={0xe, 0x42, 0x1})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 2|1<<8, 0xe,
- execInstrCopyin, dataOffset+2, execArgConst, 4|1<<8, 0x42,
- execInstrCopyin, dataOffset+6, execArgConst, 8|1<<8, 0x1,
- callID("test$end1"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 2 | 1<<8, 0xe,
+ execInstrCopyin, dataOffset + 2, execArgConst, 4 | 1<<8, 0x42,
+ execInstrCopyin, dataOffset + 6, execArgConst, 8 | 1<<8, 0x1,
+ callID("test$end1"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$bf0(&(0x7f0000000000)={0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 2|0<<16|10<<24, 0x42,
- execInstrCopyin, dataOffset+8, execArgConst, 8, 0x42,
- execInstrCopyin, dataOffset+16, execArgConst, 2|0<<16|5<<24, 0x42,
- execInstrCopyin, dataOffset+16, execArgConst, 2|5<<16|6<<24, 0x42,
- execInstrCopyin, dataOffset+16, execArgConst, 4|11<<16|15<<24, 0x42,
- execInstrCopyin, dataOffset+20, execArgConst, 2|0<<16|11<<24, 0x42,
- execInstrCopyin, dataOffset+22, execArgConst, 2|1<<8|0<<16|11<<24, 0x42,
- execInstrCopyin, dataOffset+24, execArgConst, 1, 0x42,
- callID("test$bf0"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 2 | 0<<16 | 10<<24, 0x42,
+ execInstrCopyin, dataOffset + 8, execArgConst, 8, 0x42,
+ execInstrCopyin, dataOffset + 16, execArgConst, 2 | 0<<16 | 5<<24, 0x42,
+ execInstrCopyin, dataOffset + 16, execArgConst, 2 | 5<<16 | 6<<24, 0x42,
+ execInstrCopyin, dataOffset + 16, execArgConst, 4 | 11<<16 | 15<<24, 0x42,
+ execInstrCopyin, dataOffset + 20, execArgConst, 2 | 0<<16 | 11<<24, 0x42,
+ execInstrCopyin, dataOffset + 22, execArgConst, 2 | 1<<8 | 0<<16 | 11<<24, 0x42,
+ execInstrCopyin, dataOffset + 24, execArgConst, 1, 0x42,
+ callID("test$bf0"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
&ExecProg{
Calls: []ExecCall{
{
@@ -331,7 +281,6 @@ func TestSerializeForExec(t *testing.T) {
Value: dataOffset,
},
},
- Props: DefaultCallProps(),
Copyin: []ExecCopyin{
{
Addr: dataOffset + 0,
@@ -409,120 +358,127 @@ func TestSerializeForExec(t *testing.T) {
},
{
"test$bf1(&(0x7f0000000000)={{0x42, 0x42, 0x42}, 0x42})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 4|0<<16|10<<24, 0x42,
- execInstrCopyin, dataOffset+0, execArgConst, 4|10<<16|10<<24, 0x42,
- execInstrCopyin, dataOffset+0, execArgConst, 4|20<<16|10<<24, 0x42,
- execInstrCopyin, dataOffset+4, execArgConst, 1, 0x42,
- callID("test$bf1"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 4 | 0<<16 | 10<<24, 0x42,
+ execInstrCopyin, dataOffset + 0, execArgConst, 4 | 10<<16 | 10<<24, 0x42,
+ execInstrCopyin, dataOffset + 0, execArgConst, 4 | 20<<16 | 10<<24, 0x42,
+ execInstrCopyin, dataOffset + 4, execArgConst, 1, 0x42,
+ callID("test$bf1"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$res1(0xffff)",
- join(
- callID("test$res1"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, 4, 0xffff,
+ []uint64{
+ callID("test$res1"), ExecNoCopyout, 1, execArgConst, 4, 0xffff,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$opt3(0x0)",
- join(
- callID("test$opt3"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, 8|4<<32, 0x64,
+ []uint64{
+ callID("test$opt3"), ExecNoCopyout, 1, execArgConst, 8 | 4<<32, 0x64,
execInstrEOF,
- ),
+ },
nil,
},
{
// Special value that translates to 0 for all procs.
"test$opt3(0xffffffffffffffff)",
- join(
- callID("test$opt3"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, 8, 0,
+ []uint64{
+ callID("test$opt3"), ExecNoCopyout, 1, execArgConst, 8, 0,
execInstrEOF,
- ),
+ },
nil,
},
{
// NULL pointer must be encoded os 0.
"test$opt1(0x0)",
- join(
- callID("test$opt1"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, 8, 0,
+ []uint64{
+ callID("test$opt1"), ExecNoCopyout, 1, execArgConst, 8, 0,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$align7(&(0x7f0000000000)={{0x1, 0x2, 0x3, 0x4, 0x5, 0x6}, 0x42})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 1|0<<16|1<<24, 0x1,
- execInstrCopyin, dataOffset+0, execArgConst, 1|1<<16|1<<24, 0x2,
- execInstrCopyin, dataOffset+0, execArgConst, 1|2<<16|1<<24, 0x3,
- execInstrCopyin, dataOffset+0, execArgConst, 2|3<<16|1<<24, 0x4,
- execInstrCopyin, dataOffset+0, execArgConst, 2|4<<16|1<<24, 0x5,
- execInstrCopyin, dataOffset+0, execArgConst, 2|5<<16|1<<24, 0x6,
- execInstrCopyin, dataOffset+8, execArgConst, 1, 0x42,
- callID("test$align7"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 1 | 0<<16 | 1<<24, 0x1,
+ execInstrCopyin, dataOffset + 0, execArgConst, 1 | 1<<16 | 1<<24, 0x2,
+ execInstrCopyin, dataOffset + 0, execArgConst, 1 | 2<<16 | 1<<24, 0x3,
+ execInstrCopyin, dataOffset + 0, execArgConst, 2 | 3<<16 | 1<<24, 0x4,
+ execInstrCopyin, dataOffset + 0, execArgConst, 2 | 4<<16 | 1<<24, 0x5,
+ execInstrCopyin, dataOffset + 0, execArgConst, 2 | 5<<16 | 1<<24, 0x6,
+ execInstrCopyin, dataOffset + 8, execArgConst, 1, 0x42,
+ callID("test$align7"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$excessive_fields1(0x0)",
- join(
- callID("test$excessive_fields1"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, 0x0,
+ []uint64{
+ callID("test$excessive_fields1"), ExecNoCopyout, 1, execArgConst, ptrSize, 0x0,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$excessive_fields1(0xffffffffffffffff)",
- join(
- callID("test$excessive_fields1"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, uint64(0xffffffffffffffff),
+ []uint64{
+ callID("test$excessive_fields1"), ExecNoCopyout, 1, execArgConst, ptrSize, 0xffffffffffffffff,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$excessive_fields1(0xfffffffffffffffe)",
- join(
- callID("test$excessive_fields1"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, uint64(0x9999999999999999),
+ []uint64{
+ callID("test$excessive_fields1"), ExecNoCopyout, 1, execArgConst, ptrSize, 0x9999999999999999,
execInstrEOF,
- ),
+ },
nil,
},
{
"test$csum_ipv4_tcp(&(0x7f0000000000)={{0x0, 0x1, 0x2}, {{0x0}, \"ab\"}})",
- join(
- execInstrCopyin, dataOffset+0, execArgConst, 2, 0x0,
- execInstrCopyin, dataOffset+2, execArgConst, 4|1<<8, 0x1,
- execInstrCopyin, dataOffset+6, execArgConst, 4|1<<8, 0x2,
- execInstrCopyin, dataOffset+10, execArgConst, 2, 0x0,
- execInstrCopyin, dataOffset+12, execArgData, 1, letoh64(0xab),
- execInstrCopyin, dataOffset+10, execArgCsum, 2, ExecArgCsumInet, 5,
- ExecArgCsumChunkData, dataOffset+2, 4,
- ExecArgCsumChunkData, dataOffset+6, 4,
+ []uint64{
+ execInstrCopyin, dataOffset + 0, execArgConst, 2, 0x0,
+ execInstrCopyin, dataOffset + 2, execArgConst, 4 | 1<<8, 0x1,
+ execInstrCopyin, dataOffset + 6, execArgConst, 4 | 1<<8, 0x2,
+ execInstrCopyin, dataOffset + 10, execArgConst, 2, 0x0,
+ execInstrCopyin, dataOffset + 12, execArgData, 1, letoh64(0xab),
+ execInstrCopyin, dataOffset + 10, execArgCsum, 2, ExecArgCsumInet, 5,
+ ExecArgCsumChunkData, dataOffset + 2, 4,
+ ExecArgCsumChunkData, dataOffset + 6, 4,
ExecArgCsumChunkConst, 0x0600, 2,
ExecArgCsumChunkConst, 0x0300, 2,
- ExecArgCsumChunkData, dataOffset+10, 3,
- execInstrCopyin, dataOffset+0, execArgCsum, 2, ExecArgCsumInet, 1,
- ExecArgCsumChunkData, dataOffset+0, 10,
- callID("test$csum_ipv4_tcp"), defaultCallPropsSlice,
- ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
+ ExecArgCsumChunkData, dataOffset + 10, 3,
+ execInstrCopyin, dataOffset + 0, execArgCsum, 2, ExecArgCsumInet, 1,
+ ExecArgCsumChunkData, dataOffset + 0, 10,
+ callID("test$csum_ipv4_tcp"), ExecNoCopyout, 1, execArgConst, ptrSize, dataOffset,
execInstrEOF,
- ),
+ },
nil,
},
+ {
+ "test() (fail_nth: 3)",
+ []uint64{
+ execInstrSetProps, 3,
+ callID("test"), ExecNoCopyout, 0,
+ execInstrEOF,
+ },
+ &ExecProg{
+ Calls: []ExecCall{
+ {
+ Meta: target.SyscallMap["test"],
+ Index: ExecNoCopyout,
+ Props: CallProps{3},
+ },
+ },
+ },
+ },
}
buf := make([]byte, ExecBufferSize)
@@ -591,7 +547,7 @@ func TestSerializeForExecOverflow(t *testing.T) {
overflow: false,
gen: func(w *bytes.Buffer) {
fmt.Fprintf(w, "r0 = test$res0()\n")
- for i := 0; i < 42e3; i++ {
+ for i := 0; i < 58e3; i++ {
fmt.Fprintf(w, "test$res1(r0)\n")
}
},
diff --git a/prog/minimization.go b/prog/minimization.go
index 5e0383bb8..71320d92e 100644
--- a/prog/minimization.go
+++ b/prog/minimization.go
@@ -28,7 +28,7 @@ func Minimize(p0 *Prog, callIndex0 int, crash bool, pred0 func(*Prog, int) bool)
// Try to remove all calls except the last one one-by-one.
p0, callIndex0 = removeCalls(p0, callIndex0, crash, pred)
- // Try to minimize individual args.
+ // Try to minimize individual calls.
for i := 0; i < len(p0.Calls); i++ {
ctx := &minimizeArgsCtx{
target: p0.Target,
@@ -46,6 +46,7 @@ func Minimize(p0 *Prog, callIndex0 int, crash bool, pred0 func(*Prog, int) bool)
goto again
}
}
+ p0 = minimizeCallProps(p0, i, callIndex0, pred)
}
if callIndex0 != -1 {
@@ -77,6 +78,21 @@ func removeCalls(p0 *Prog, callIndex0 int, crash bool, pred func(*Prog, int) boo
return p0, callIndex0
}
+func minimizeCallProps(p0 *Prog, callIndex, callIndex0 int, pred func(*Prog, int) bool) *Prog {
+ props := p0.Calls[callIndex].Props
+
+ // Try to drop fault injection.
+ if props.FailNth > 0 {
+ p := p0.Clone()
+ p.Calls[callIndex].Props.FailNth = 0
+ if pred(p, callIndex0) {
+ p0 = p
+ }
+ }
+
+ return p0
+}
+
type minimizeArgsCtx struct {
target *Target
p0 **Prog
diff --git a/prog/minimization_test.go b/prog/minimization_test.go
index 36b65763d..032b2b080 100644
--- a/prog/minimization_test.go
+++ b/prog/minimization_test.go
@@ -149,6 +149,28 @@ func TestMinimize(t *testing.T) {
"minimize$0(0x1, 0xffffffffffffffff)\n",
-1,
},
+ // Clear unneeded fault injection.
+ {
+ "linux", "amd64",
+ "pipe2(0x0, 0x0) (fail_nth: 5)\n",
+ -1,
+ func(p *Prog, callIndex int) bool {
+ return len(p.Calls) == 1 && p.Calls[0].Meta.Name == "pipe2"
+ },
+ "pipe2(0x0, 0x0)\n",
+ -1,
+ },
+ // Keep important fault injection.
+ {
+ "linux", "amd64",
+ "pipe2(0x0, 0x0) (fail_nth: 5)\n",
+ -1,
+ func(p *Prog, callIndex int) bool {
+ return len(p.Calls) == 1 && p.Calls[0].Meta.Name == "pipe2" && p.Calls[0].Props.FailNth == 5
+ },
+ "pipe2(0x0, 0x0) (fail_nth: 5)\n",
+ -1,
+ },
}
t.Parallel()
for ti, test := range tests {
diff --git a/prog/parse.go b/prog/parse.go
index 7a46322df..77812f5b4 100644
--- a/prog/parse.go
+++ b/prog/parse.go
@@ -10,19 +10,17 @@ import (
// LogEntry describes one program in execution log.
type LogEntry struct {
- P *Prog
- Proc int // index of parallel proc
- Start int // start offset in log
- End int // end offset in log
- Fault bool // program was executed with fault injection in FaultCall/FaultNth
- FaultCall int
- FaultNth int
+ P *Prog
+ Proc int // index of parallel proc
+ Start int // start offset in log
+ End int // end offset in log
}
func (target *Target) ParseLog(data []byte) []*LogEntry {
var entries []*LogEntry
ent := &LogEntry{}
var cur []byte
+ faultCall, faultNth := -1, -1
for pos := 0; pos < len(data); {
nl := bytes.IndexByte(data[pos:], '\n')
if nl == -1 {
@@ -38,15 +36,17 @@ func (target *Target) ParseLog(data []byte) []*LogEntry {
if ent.P != nil && len(ent.P.Calls) != 0 {
ent.End = pos0
entries = append(entries, ent)
+ faultCall, faultNth = -1, -1
}
ent = &LogEntry{
Proc: proc,
Start: pos0,
}
- if faultCall, ok := extractInt(line, "fault-call:"); ok {
- ent.Fault = true
- ent.FaultCall = faultCall
- ent.FaultNth, _ = extractInt(line, "fault-nth:")
+ // We no longer print it this way, but we still parse such fragments to preserve
+ // the backward compatibility.
+ if parsedFaultCall, ok := extractInt(line, "fault-call:"); ok {
+ faultCall = parsedFaultCall
+ faultNth, _ = extractInt(line, "fault-nth:")
}
cur = nil
continue
@@ -55,10 +55,17 @@ func (target *Target) ParseLog(data []byte) []*LogEntry {
continue
}
tmp := append(cur, line...)
+
p, err := target.Deserialize(tmp, NonStrict)
if err != nil {
continue
}
+
+ if faultCall >= 0 && faultCall < len(p.Calls) {
+ // We add 1 because now the property is 1-based.
+ p.Calls[faultCall].Props.FailNth = faultNth + 1
+ }
+
cur = tmp
ent.P = p
}
diff --git a/prog/parse_test.go b/prog/parse_test.go
index 48e3cd203..8de2e36da 100644
--- a/prog/parse_test.go
+++ b/prog/parse_test.go
@@ -30,7 +30,7 @@ gettid()
if ent.Proc != 0 {
t.Fatalf("proc %v, want 0", ent.Proc)
}
- if ent.Fault || ent.FaultCall != 0 || ent.FaultNth != 0 {
+ if ent.P.HasFaultInjection() {
t.Fatalf("fault injection enabled")
}
want := "getpid-gettid"
@@ -67,7 +67,7 @@ func TestParseMulti(t *testing.T) {
t.Fatalf("bad procs")
}
for i, ent := range entries {
- if ent.Fault || ent.FaultCall != 0 || ent.FaultNth != 0 {
+ if ent.P.HasFaultInjection() {
t.Fatalf("prog %v has fault injection enabled", i)
}
}
@@ -123,14 +123,14 @@ getpid()
t.Fatalf("got %v programs, want 1", len(entries))
}
ent := entries[0]
- if !ent.Fault {
- t.Fatalf("fault injection is not enabled")
- }
- if ent.FaultCall != 1 {
- t.Fatalf("fault call: got %v, want 1", ent.FaultCall)
- }
- if ent.FaultNth != 55 {
- t.Fatalf("fault nth: got %v, want 55", ent.FaultNth)
+ faultCall := ent.P.Calls[1]
+ normalCall := ent.P.Calls[0]
+ if faultCall.Props.FailNth != 56 {
+ // We want 56 (not 55!) because the number is now not 0-based.
+ t.Fatalf("fault nth on the 2nd call: got %v, want 56", faultCall.Props.FailNth)
+ }
+ if normalCall.Props.FailNth != 0 {
+ t.Fatalf("fault nth on the 1st call: got %v, want 0", normalCall.Props.FailNth)
}
want := "gettid-getpid"
got := ent.P.String()
diff --git a/prog/prog.go b/prog/prog.go
index 6ead4d47e..068198ffe 100644
--- a/prog/prog.go
+++ b/prog/prog.go
@@ -22,12 +22,6 @@ type CallProps struct {
FailNth int `key:"fail_nth"`
}
-func DefaultCallProps() CallProps {
- return CallProps{
- FailNth: -1,
- }
-}
-
type Call struct {
Meta *Syscall
Args []Arg
@@ -38,10 +32,9 @@ type Call struct {
func MakeCall(meta *Syscall, args []Arg) *Call {
return &Call{
- Meta: meta,
- Args: args,
- Ret: MakeReturnArg(meta.Ret),
- Props: DefaultCallProps(),
+ Meta: meta,
+ Args: args,
+ Ret: MakeReturnArg(meta.Ret),
}
}
diff --git a/sys/syz-sysgen/sysgen.go b/sys/syz-sysgen/sysgen.go
index fbff765cc..44e788fcf 100644
--- a/sys/syz-sysgen/sysgen.go
+++ b/sys/syz-sysgen/sysgen.go
@@ -187,8 +187,8 @@ func main() {
data.CallAttrs = append(data.CallAttrs, prog.CppName(attrs.Field(i).Name))
}
- defaultProps := prog.DefaultCallProps()
- defaultProps.ForeachProp(func(name, _ string, value reflect.Value) {
+ props := prog.CallProps{}
+ props.ForeachProp(func(name, _ string, value reflect.Value) {
data.CallProps = append(data.CallProps, CallPropDescription{
Type: value.Kind().String(),
Name: prog.CppName(name),
diff --git a/syz-fuzzer/proc.go b/syz-fuzzer/proc.go
index 1b05c7f9b..3f9b265e0 100644
--- a/syz-fuzzer/proc.go
+++ b/syz-fuzzer/proc.go
@@ -217,13 +217,11 @@ func (proc *Proc) smashInput(item *WorkSmash) {
}
func (proc *Proc) failCall(p *prog.Prog, call int) {
- for nth := 0; nth < 100; nth++ {
+ for nth := 1; nth <= 100; nth++ {
log.Logf(1, "#%v: injecting fault into call %v/%v", proc.pid, call, nth)
- opts := *proc.execOpts
- opts.Flags |= ipc.FlagInjectFault
- opts.FaultCall = call
- opts.FaultNth = nth
- info := proc.executeRaw(&opts, p, StatSmash)
+ newProg := p.Clone()
+ newProg.Calls[call].Props.FailNth = nth
+ info := proc.executeRaw(proc.execOpts, newProg, StatSmash)
if info != nil && len(info.Calls) > call && info.Calls[call].Flags&ipc.CallFaultInjected == 0 {
break
}
@@ -316,10 +314,6 @@ func (proc *Proc) logProgram(opts *ipc.ExecOpts, p *prog.Prog) {
}
data := p.Serialize()
- strOpts := ""
- if opts.Flags&ipc.FlagInjectFault != 0 {
- strOpts = fmt.Sprintf(" (fault-call:%v fault-nth:%v)", opts.FaultCall, opts.FaultNth)
- }
// The following output helps to understand what program crashed kernel.
// It must not be intermixed.
@@ -327,25 +321,22 @@ func (proc *Proc) logProgram(opts *ipc.ExecOpts, p *prog.Prog) {
case OutputStdout:
now := time.Now()
proc.fuzzer.logMu.Lock()
- fmt.Printf("%02v:%02v:%02v executing program %v%v:\n%s\n",
+ fmt.Printf("%02v:%02v:%02v executing program %v:\n%s\n",
now.Hour(), now.Minute(), now.Second(),
- proc.pid, strOpts, data)
+ proc.pid, data)
proc.fuzzer.logMu.Unlock()
case OutputDmesg:
fd, err := syscall.Open("/dev/kmsg", syscall.O_WRONLY, 0)
if err == nil {
buf := new(bytes.Buffer)
- fmt.Fprintf(buf, "syzkaller: executing program %v%v:\n%s\n",
- proc.pid, strOpts, data)
+ fmt.Fprintf(buf, "syzkaller: executing program %v:\n%s\n",
+ proc.pid, data)
syscall.Write(fd, buf.Bytes())
syscall.Close(fd)
}
case OutputFile:
f, err := os.Create(fmt.Sprintf("%v-%v.prog", proc.fuzzer.name, proc.pid))
if err == nil {
- if strOpts != "" {
- fmt.Fprintf(f, "#%v\n", strOpts)
- }
f.Write(data)
f.Close()
}
diff --git a/tools/syz-execprog/execprog.go b/tools/syz-execprog/execprog.go
index dc6b1a22c..62cc61d4f 100644
--- a/tools/syz-execprog/execprog.go
+++ b/tools/syz-execprog/execprog.go
@@ -35,8 +35,6 @@ var (
flagProcs = flag.Int("procs", 1, "number of parallel processes to execute programs")
flagOutput = flag.Bool("output", false, "write programs and results to stdout")
flagHints = flag.Bool("hints", false, "do a hints-generation run")
- flagFaultCall = flag.Int("fault_call", -1, "inject fault into this call (0-based)")
- flagFaultNth = flag.Int("fault_nth", 0, "inject fault on n-th operation (0-based)")
flagEnable = flag.String("enable", "none", "enable only listed additional features")
flagDisable = flag.String("disable", "none", "enable all additional features except listed")
)
@@ -149,13 +147,6 @@ func (ctx *Context) execute(pid int, env *ipc.Env, entry *prog.LogEntry) {
defer ctx.gate.Leave(ticket)
callOpts := ctx.execOpts
- if *flagFaultCall == -1 && entry.Fault {
- newOpts := *ctx.execOpts
- newOpts.Flags |= ipc.FlagInjectFault
- newOpts.FaultCall = entry.FaultCall
- newOpts.FaultNth = entry.FaultNth
- callOpts = &newOpts
- }
if *flagOutput {
ctx.logProgram(pid, entry.P, callOpts)
}
@@ -190,14 +181,9 @@ func (ctx *Context) execute(pid int, env *ipc.Env, entry *prog.LogEntry) {
}
func (ctx *Context) logProgram(pid int, p *prog.Prog, callOpts *ipc.ExecOpts) {
- strOpts := ""
- if callOpts.Flags&ipc.FlagInjectFault != 0 {
- strOpts = fmt.Sprintf(" (fault-call:%v fault-nth:%v)",
- callOpts.FaultCall, callOpts.FaultNth)
- }
data := p.Serialize()
ctx.logMu.Lock()
- log.Logf(0, "executing program %v%v:\n%s", pid, strOpts, data)
+ log.Logf(0, "executing program %v:\n%s", pid, data)
ctx.logMu.Unlock()
}
@@ -319,11 +305,6 @@ func createConfig(target *prog.Target, features *host.Features, featuresFlags cs
if features[host.FeatureExtraCoverage].Enabled {
config.Flags |= ipc.FlagExtraCover
}
- if *flagFaultCall >= 0 {
- execOpts.Flags |= ipc.FlagInjectFault
- execOpts.FaultCall = *flagFaultCall
- execOpts.FaultNth = *flagFaultNth
- }
if featuresFlags["tun"].Enabled && features[host.FeatureNetInjection].Enabled {
config.Flags |= ipc.FlagEnableTun
}
diff --git a/tools/syz-prog2c/prog2c.go b/tools/syz-prog2c/prog2c.go
index ea729883f..aa9c146e1 100644
--- a/tools/syz-prog2c/prog2c.go
+++ b/tools/syz-prog2c/prog2c.go
@@ -27,8 +27,6 @@ var (
flagSlowdown = flag.Int("slowdown", 1, "execution slowdown caused by emulation/instrumentation")
flagSandbox = flag.String("sandbox", "", "sandbox to use (none, setuid, namespace)")
flagProg = flag.String("prog", "", "file with program to convert (required)")
- flagFaultCall = flag.Int("fault_call", -1, "inject fault into this call (0-based)")
- flagFaultNth = flag.Int("fault_nth", 0, "inject fault on n-th operation (0-based)")
flagHandleSegv = flag.Bool("segv", false, "catch and ignore SIGSEGV")
flagUseTmpDir = flag.Bool("tmpdir", false, "create a temporary dir and execute inside it")
flagTrace = flag.Bool("trace", false, "trace syscall results")
@@ -80,9 +78,6 @@ func main() {
Procs: *flagProcs,
Slowdown: *flagSlowdown,
Sandbox: *flagSandbox,
- Fault: *flagFaultCall >= 0,
- FaultCall: *flagFaultCall,
- FaultNth: *flagFaultNth,
Leak: *flagLeak,
NetInjection: features["tun"].Enabled,
NetDevices: features["net_dev"].Enabled,