diff options
| author | Andrey Konovalov <andreyknvl@google.com> | 2018-11-22 19:04:06 +0100 |
|---|---|---|
| committer | Andrey Konovalov <andreyknvl@gmail.com> | 2019-01-16 19:19:53 +0100 |
| commit | b5df78dc5d994bc61f1ecee2c5c85313178f392e (patch) | |
| tree | c285f3be9e8d0ef32e607186ec9ce9eae6901cce | |
| parent | c0d4a12ee72a2279eada43d9476d2f8a074c3818 (diff) | |
all: support extra coverage
Right now syzkaller only supports coverage collected from the threads that
execute syscalls. However some useful things happen in background threads,
and it would be nice to collect coverage from those threads as well.
This change adds extra coverage support to syzkaller. This coverage is not
associated with a particular syscall, but rather with the whole program.
Executor passes extra coverage over the same ipc mechanism to syz-fuzzer
with syscall number set to -1. syz-fuzzer then passes this coverage to
syz-manager with the call name "extra".
This change requires the following kcov patch:
https://github.com/xairy/linux/pull/2
| -rw-r--r-- | executor/common_linux.h | 7 | ||||
| -rw-r--r-- | executor/executor.cc | 71 | ||||
| -rw-r--r-- | executor/executor_bsd.h | 4 | ||||
| -rw-r--r-- | executor/executor_linux.h | 51 | ||||
| -rw-r--r-- | executor/nocover.h | 4 | ||||
| -rw-r--r-- | pkg/csource/generated.go | 7 | ||||
| -rw-r--r-- | pkg/ipc/ipc.go | 57 | ||||
| -rw-r--r-- | pkg/runtest/run.go | 4 | ||||
| -rw-r--r-- | syz-fuzzer/fuzzer.go | 38 | ||||
| -rw-r--r-- | syz-fuzzer/proc.go | 98 | ||||
| -rw-r--r-- | tools/syz-execprog/execprog.go | 30 |
11 files changed, 265 insertions, 106 deletions
diff --git a/executor/common_linux.h b/executor/common_linux.h index 608dc964c..78d1e82a1 100644 --- a/executor/common_linux.h +++ b/executor/common_linux.h @@ -1791,7 +1791,12 @@ static void sandbox_common() #endif struct rlimit rlim; - rlim.rlim_cur = rlim.rlim_max = 200 << 20; +#if SYZ_EXECUTOR + rlim.rlim_cur = rlim.rlim_max = (200 << 20) + + (kMaxThreads * kCoverSize + kExtraCoverSize) * sizeof(void*); +#else + rlim.rlim_cur = rlim.rlim_max = (200 << 20); +#endif setrlimit(RLIMIT_AS, &rlim); rlim.rlim_cur = rlim.rlim_max = 32 << 20; setrlimit(RLIMIT_MEMLOCK, &rlim); diff --git a/executor/executor.cc b/executor/executor.cc index 72f19105c..bb0e8defd 100644 --- a/executor/executor.cc +++ b/executor/executor.cc @@ -58,6 +58,7 @@ const int kOutPipeFd = kMaxFd - 2; // remapped from stdout const int kCoverFd = kOutPipeFd - kMaxThreads; const int kMaxArgs = 9; const int kCoverSize = 256 << 10; +const int kExtraCoverSize = 256 << 10; const int kFailStatus = 67; const int kRetryStatus = 69; const int kErrorStatus = 68; @@ -208,6 +209,8 @@ struct thread_t { static thread_t threads[kMaxThreads]; static thread_t* last_scheduled; +static cover_t extra_cov; + struct res_t { bool executed; uint64 val; @@ -288,6 +291,7 @@ static thread_t* schedule_call(int call_index, int call_num, bool colliding, uin static void handle_completion(thread_t* th); static void copyout_call_results(thread_t* th); static void write_call_output(thread_t* th, bool finished); +static void write_extra_output(); static void execute_call(thread_t* th); static void thread_create(thread_t* th, int id); static void* worker_thread(void* arg); @@ -366,8 +370,11 @@ int main(int argc, char** argv) if (flag_cover) { for (int i = 0; i < kMaxThreads; i++) { threads[i].cov.fd = kCoverFd + i; - cover_open(&threads[i].cov); + cover_open(&threads[i].cov, false); } + cover_open(&extra_cov, true); + // Don't enable comps because we don't use them in the fuzzer yet. + cover_enable(&extra_cov, false, true); } int status = 0; @@ -557,8 +564,11 @@ void execute_one() retry: uint64* input_pos = (uint64*)input_data; - if (flag_cover && !colliding && !flag_threaded) - cover_enable(&threads[0].cov, flag_collect_comps); + if (flag_cover && !colliding) { + if (!flag_threaded) + cover_enable(&threads[0].cov, flag_collect_comps, false); + cover_reset(&extra_cov); + } int call_index = 0; for (;;) { @@ -719,6 +729,7 @@ retry: write_call_output(th, false); } } + write_extra_output(); } } @@ -766,21 +777,21 @@ thread_t* schedule_call(int call_index, int call_num, bool colliding, uint64 cop } #if SYZ_EXECUTOR_USES_SHMEM -template <typename cover_t> -void write_coverage_signal(thread_t* th, uint32* signal_count_pos, uint32* cover_count_pos) +template <typename cover_data_t> +void write_coverage_signal(cover_t* cov, uint32* signal_count_pos, uint32* cover_count_pos) { // Write out feedback signals. // Currently it is code edges computed as xor of two subsequent basic block PCs. - cover_t* cover_data = ((cover_t*)th->cov.data) + 1; + cover_data_t* cover_data = ((cover_data_t*)cov->data) + 1; uint32 nsig = 0; - cover_t prev = 0; - for (uint32 i = 0; i < th->cov.size; i++) { - cover_t pc = cover_data[i]; + cover_data_t prev = 0; + for (uint32 i = 0; i < cov->size; i++) { + cover_data_t pc = cover_data[i]; if (!cover_check(pc)) { debug("got bad pc: 0x%llx\n", (uint64)pc); doexit(0); } - cover_t sig = pc ^ prev; + cover_data_t sig = pc ^ prev; prev = hash(pc); if (dedup(sig)) continue; @@ -793,9 +804,9 @@ void write_coverage_signal(thread_t* th, uint32* signal_count_pos, uint32* cover if (!flag_collect_cover) return; // Write out real coverage (basic block PCs). - uint32 cover_size = th->cov.size; + uint32 cover_size = cov->size; if (flag_dedup_cover) { - cover_t* end = cover_data + cover_size; + cover_data_t* end = cover_data + cover_size; std::sort(cover_data, end); cover_size = std::unique(cover_data, end) - cover_data; } @@ -814,8 +825,10 @@ void handle_completion(thread_t* th) event_isset(&th->ready), event_isset(&th->done), th->executing); if (th->res != (long)-1) copyout_call_results(th); - if (!collide && !th->colliding) + if (!collide && !th->colliding) { write_call_output(th, true); + write_extra_output(); + } th->executing = false; running--; if (running < 0) @@ -894,9 +907,9 @@ void write_call_output(thread_t* th, bool finished) *comps_count_pos = comps_size; } else if (flag_cover) { if (is_kernel_64_bit) - write_coverage_signal<uint64>(th, signal_count_pos, cover_count_pos); + write_coverage_signal<uint64>(&th->cov, signal_count_pos, cover_count_pos); else - write_coverage_signal<uint32>(th, signal_count_pos, cover_count_pos); + write_coverage_signal<uint32>(&th->cov, signal_count_pos, cover_count_pos); } debug_verbose("out #%u: index=%u num=%u errno=%d finished=%d blocked=%d sig=%u cover=%u comps=%u\n", completed, th->call_index, th->call_num, reserrno, finished, blocked, @@ -922,6 +935,32 @@ void write_call_output(thread_t* th, bool finished) #endif } +void write_extra_output() +{ +#if SYZ_EXECUTOR_USES_SHMEM + if (!flag_cover || flag_collect_comps) + return; + cover_collect(&extra_cov); + if (!extra_cov.size) + return; + write_output(-1); // call index + write_output(-1); // call num + write_output(999); // errno + write_output(0); // call flags + uint32* signal_count_pos = write_output(0); // filled in later + uint32* cover_count_pos = write_output(0); // filled in later + write_output(0); // comps_count_pos + if (is_kernel_64_bit) + write_coverage_signal<uint64>(&extra_cov, signal_count_pos, cover_count_pos); + else + write_coverage_signal<uint32>(&extra_cov, signal_count_pos, cover_count_pos); + cover_reset(&extra_cov); + debug_verbose("extra: sig=%u cover=%u\n", *signal_count_pos, *cover_count_pos); + completed++; + write_completed(completed); +#endif +} + void thread_create(thread_t* th, int id) { th->created = true; @@ -939,7 +978,7 @@ void* worker_thread(void* arg) thread_t* th = (thread_t*)arg; if (flag_cover) - cover_enable(&th->cov, flag_collect_comps); + cover_enable(&th->cov, flag_collect_comps, false); for (;;) { event_wait(&th->ready); event_reset(&th->ready); diff --git a/executor/executor_bsd.h b/executor/executor_bsd.h index e0fce2c1e..3af57fa5b 100644 --- a/executor/executor_bsd.h +++ b/executor/executor_bsd.h @@ -54,7 +54,7 @@ static long execute_syscall(const call_t* c, long a[kMaxArgs]) #if GOOS_freebsd || GOOS_openbsd -static void cover_open(cover_t* cov) +static void cover_open(cover_t* cov, bool extra) { int fd = open("/dev/kcov", O_RDWR); if (fd == -1) @@ -85,7 +85,7 @@ static void cover_open(cover_t* cov) cov->data_end = cov->data + mmap_alloc_size; } -static void cover_enable(cover_t* cov, bool collect_comps) +static void cover_enable(cover_t* cov, bool collect_comps, bool extra) { int kcov_mode = collect_comps ? KCOV_MODE_TRACE_CMP : KCOV_MODE_TRACE_PC; #if GOOS_freebsd diff --git a/executor/executor_linux.h b/executor/executor_linux.h index c7af48144..68b143ba0 100644 --- a/executor/executor_linux.h +++ b/executor/executor_linux.h @@ -11,13 +11,30 @@ #include <sys/syscall.h> #include <unistd.h> +const unsigned long KCOV_TRACE_PC = 0; +const unsigned long KCOV_TRACE_CMP = 1; + +template <int N> +struct kcov_remote_arg { + unsigned trace_mode; + unsigned area_size; + unsigned num_handles; + __u64 common_handle; + __u64 handles[N]; +}; + #define KCOV_INIT_TRACE32 _IOR('c', 1, uint32) #define KCOV_INIT_TRACE64 _IOR('c', 1, uint64) #define KCOV_ENABLE _IO('c', 100) #define KCOV_DISABLE _IO('c', 101) +#define KCOV_REMOTE_ENABLE _IOW('c', 102, struct kcov_remote_arg<0>) -const unsigned long KCOV_TRACE_PC = 0; -const unsigned long KCOV_TRACE_CMP = 1; +#define KCOV_REMOTE_HANDLE_USB 0x4242000000000000ull + +static inline __u64 kcov_remote_handle_usb(int bus) +{ + return KCOV_REMOTE_HANDLE_USB + (__u64)bus; +} static bool detect_kernel_bitness(); @@ -38,7 +55,7 @@ static long execute_syscall(const call_t* c, long a[kMaxArgs]) return syscall(c->sys_nr, a[0], a[1], a[2], a[3], a[4], a[5]); } -static void cover_open(cover_t* cov) +static void cover_open(cover_t* cov, bool extra) { int fd = open("/sys/kernel/debug/kcov", O_RDWR); if (fd == -1) @@ -47,9 +64,10 @@ static void cover_open(cover_t* cov) fail("filed to dup2(%d, %d) cover fd", fd, cov->fd); close(fd); const int kcov_init_trace = is_kernel_64_bit ? KCOV_INIT_TRACE64 : KCOV_INIT_TRACE32; - if (ioctl(cov->fd, kcov_init_trace, kCoverSize)) + const int cover_size = extra ? kExtraCoverSize : kCoverSize; + if (ioctl(cov->fd, kcov_init_trace, cover_size)) fail("cover init trace write failed"); - size_t mmap_alloc_size = kCoverSize * (is_kernel_64_bit ? 8 : 4); + size_t mmap_alloc_size = cover_size * (is_kernel_64_bit ? 8 : 4); cov->data = (char*)mmap(NULL, mmap_alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED, cov->fd, 0); if (cov->data == MAP_FAILED) @@ -57,15 +75,28 @@ static void cover_open(cover_t* cov) cov->data_end = cov->data + mmap_alloc_size; } -static void cover_enable(cover_t* cov, bool collect_comps) +static void cover_enable(cover_t* cov, bool collect_comps, bool extra) { int kcov_mode = collect_comps ? KCOV_TRACE_CMP : KCOV_TRACE_PC; - // This should be fatal, + // The KCOV_ENABLE call should be fatal, // but in practice ioctl fails with assorted errors (9, 14, 25), // so we use exitf. - if (ioctl(cov->fd, KCOV_ENABLE, kcov_mode)) - exitf("cover enable write trace failed, mode=%d", kcov_mode); - current_cover = cov; + if (!extra) { + if (ioctl(cov->fd, KCOV_ENABLE, kcov_mode)) + exitf("cover enable write trace failed, mode=%d", kcov_mode); + current_cover = cov; + return; + } + struct kcov_remote_arg<1> arg; + memset(&arg, 0, sizeof(arg)); + arg.trace_mode = kcov_mode; + // Coverage buffer size of remote threads. + arg.area_size = kExtraCoverSize * (is_kernel_64_bit ? 8 : 4); + arg.num_handles = 1; + arg.handles[0] = kcov_remote_handle_usb(procid); + arg.common_handle = procid + 1; + if (ioctl(cov->fd, KCOV_REMOTE_ENABLE, &arg)) + exitf("cover enable write trace failed"); } static void cover_reset(cover_t* cov) diff --git a/executor/nocover.h b/executor/nocover.h index 94f3707f0..3b23f66ab 100644 --- a/executor/nocover.h +++ b/executor/nocover.h @@ -1,11 +1,11 @@ // Copyright 2018 syzkaller project authors. All rights reserved. // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. -static void cover_open(cover_t* cov) +static void cover_open(cover_t* cov, bool extra) { } -static void cover_enable(cover_t* cov, bool collect_comps) +static void cover_enable(cover_t* cov, bool collect_comps, bool extra) { } diff --git a/pkg/csource/generated.go b/pkg/csource/generated.go index cf1c9784a..1d7e6253f 100644 --- a/pkg/csource/generated.go +++ b/pkg/csource/generated.go @@ -3589,7 +3589,12 @@ static void sandbox_common() #endif struct rlimit rlim; - rlim.rlim_cur = rlim.rlim_max = 200 << 20; +#if SYZ_EXECUTOR + rlim.rlim_cur = rlim.rlim_max = (200 << 20) + + (kMaxThreads * kCoverSize + kExtraCoverSize) * sizeof(void*); +#else + rlim.rlim_cur = rlim.rlim_max = (200 << 20); +#endif setrlimit(RLIMIT_AS, &rlim); rlim.rlim_cur = rlim.rlim_max = 32 << 20; setrlimit(RLIMIT_MEMLOCK, &rlim); diff --git a/pkg/ipc/ipc.go b/pkg/ipc/ipc.go index 6c4538faf..b3799ebfb 100644 --- a/pkg/ipc/ipc.go +++ b/pkg/ipc/ipc.go @@ -15,7 +15,9 @@ import ( "time" "unsafe" + "github.com/google/syzkaller/pkg/cover" "github.com/google/syzkaller/pkg/osutil" + "github.com/google/syzkaller/pkg/signal" "github.com/google/syzkaller/prog" ) @@ -95,7 +97,7 @@ type CallInfo struct { type ProgInfo struct { Calls []CallInfo - // TODO: remote coverage would go here. + Extra CallInfo // stores Signal and Cover collected from background threads } type Env struct { @@ -125,6 +127,8 @@ const ( compSizeMask = 6 compSize8 = 6 compConstMask = 1 + + extraReplyIndex = 0xffffffff // uint32(-1) ) func SandboxToFlags(sandbox string) (EnvFlags, error) { @@ -325,24 +329,31 @@ func (env *Env) parseOutput(p *prog.Prog) (*ProgInfo, error) { return nil, fmt.Errorf("failed to read number of calls") } info := &ProgInfo{Calls: make([]CallInfo, len(p.Calls))} + extraParts := make([]CallInfo, 0) for i := uint32(0); i < ncmd; i++ { if len(out) < int(unsafe.Sizeof(callReply{})) { return nil, fmt.Errorf("failed to read call %v reply", i) } reply := *(*callReply)(unsafe.Pointer(&out[0])) out = out[unsafe.Sizeof(callReply{}):] - if int(reply.index) >= len(info.Calls) { - return nil, fmt.Errorf("bad call %v index %v/%v", i, reply.index, len(info.Calls)) - } - if num := p.Calls[reply.index].Meta.ID; int(reply.num) != num { - return nil, fmt.Errorf("wrong call %v num %v/%v", i, reply.num, num) - } - inf := &info.Calls[reply.index] - if inf.Flags != 0 || inf.Signal != nil { - return nil, fmt.Errorf("duplicate reply for call %v/%v/%v", i, reply.index, reply.num) + var inf *CallInfo + if reply.index != extraReplyIndex { + if int(reply.index) >= len(info.Calls) { + return nil, fmt.Errorf("bad call %v index %v/%v", i, reply.index, len(info.Calls)) + } + if num := p.Calls[reply.index].Meta.ID; int(reply.num) != num { + return nil, fmt.Errorf("wrong call %v num %v/%v", i, reply.num, num) + } + inf = &info.Calls[reply.index] + if inf.Flags != 0 || inf.Signal != nil { + return nil, fmt.Errorf("duplicate reply for call %v/%v/%v", i, reply.index, reply.num) + } + inf.Errno = int(reply.errno) + inf.Flags = CallFlags(reply.flags) + } else { + extraParts = append(extraParts, CallInfo{}) + inf = &extraParts[len(extraParts)-1] } - inf.Errno = int(reply.errno) - inf.Flags = CallFlags(reply.flags) if inf.Signal, ok = readUint32Array(&out, reply.signalSize); !ok { return nil, fmt.Errorf("call %v/%v/%v: signal overflow: %v/%v", i, reply.index, reply.num, reply.signalSize, len(out)) @@ -357,9 +368,31 @@ func (env *Env) parseOutput(p *prog.Prog) (*ProgInfo, error) { } inf.Comps = comps } + if len(extraParts) == 0 { + return info, nil + } + info.Extra = convertExtra(extraParts) return info, nil } +func convertExtra(extraParts []CallInfo) CallInfo { + var extra CallInfo + extraCover := make(cover.Cover) + extraSignal := make(signal.Signal) + for _, part := range extraParts { + extraCover.Merge(part.Cover) + extraSignal.Merge(signal.FromRaw(part.Signal, 0)) + } + extra.Cover = extraCover.Serialize() + extra.Signal = make([]uint32, len(extraSignal)) + i := 0 + for s := range extraSignal { + extra.Signal[i] = uint32(s) + i++ + } + return extra +} + func readComps(outp *[]byte, compsSize uint32) (prog.CompMap, error) { if compsSize == 0 { return nil, nil diff --git a/pkg/runtest/run.go b/pkg/runtest/run.go index efcbe3066..cac5f7334 100644 --- a/pkg/runtest/run.go +++ b/pkg/runtest/run.go @@ -519,11 +519,13 @@ func RunTest(req *RunRequest, executor string) { req.Err = fmt.Errorf("run %v: hanged", run) return } + // Detach Signal and Cover because they point into the output shmem region. for i := range info.Calls { - // Detach them because they point into the output shmem region. info.Calls[i].Signal = append([]uint32{}, info.Calls[i].Signal...) info.Calls[i].Cover = append([]uint32{}, info.Calls[i].Cover...) } + info.Extra.Signal = append([]uint32{}, info.Extra.Signal...) + info.Extra.Cover = append([]uint32{}, info.Extra.Cover...) req.Info = append(req.Info, info) } } diff --git a/syz-fuzzer/fuzzer.go b/syz-fuzzer/fuzzer.go index 8aafc33c7..b6f7e21ab 100644 --- a/syz-fuzzer/fuzzer.go +++ b/syz-fuzzer/fuzzer.go @@ -384,30 +384,40 @@ func (fuzzer *Fuzzer) corpusSignalDiff(sign signal.Signal) signal.Signal { return fuzzer.corpusSignal.Diff(sign) } -func (fuzzer *Fuzzer) checkNewSignal(p *prog.Prog, info *ipc.ProgInfo) (calls []int) { +func (fuzzer *Fuzzer) checkNewSignal(p *prog.Prog, info *ipc.ProgInfo) (calls []int, extra bool) { fuzzer.signalMu.RLock() defer fuzzer.signalMu.RUnlock() for i, inf := range info.Calls { - diff := fuzzer.maxSignal.DiffRaw(inf.Signal, signalPrio(p.Target, p.Calls[i], &inf)) - if diff.Empty() { - continue + if fuzzer.checkNewCallSignal(p, &inf, i) { + calls = append(calls, i) } - calls = append(calls, i) - fuzzer.signalMu.RUnlock() - fuzzer.signalMu.Lock() - fuzzer.maxSignal.Merge(diff) - fuzzer.newSignal.Merge(diff) - fuzzer.signalMu.Unlock() - fuzzer.signalMu.RLock() } + extra = fuzzer.checkNewCallSignal(p, &info.Extra, -1) return } -func signalPrio(target *prog.Target, c *prog.Call, ci *ipc.CallInfo) (prio uint8) { - if ci.Errno == 0 { +func (fuzzer *Fuzzer) checkNewCallSignal(p *prog.Prog, info *ipc.CallInfo, call int) bool { + diff := fuzzer.maxSignal.DiffRaw(info.Signal, signalPrio(p, info, call)) + if diff.Empty() { + return false + } + fuzzer.signalMu.RUnlock() + fuzzer.signalMu.Lock() + fuzzer.maxSignal.Merge(diff) + fuzzer.newSignal.Merge(diff) + fuzzer.signalMu.Unlock() + fuzzer.signalMu.RLock() + return true +} + +func signalPrio(p *prog.Prog, info *ipc.CallInfo, call int) (prio uint8) { + if call == -1 { + return 0 + } + if info.Errno == 0 { prio |= 1 << 1 } - if !target.CallContainsAny(c) { + if !p.Target.CallContainsAny(p.Calls[call]) { prio |= 1 << 0 } return diff --git a/syz-fuzzer/proc.go b/syz-fuzzer/proc.go index f6b7325f5..18571b902 100644 --- a/syz-fuzzer/proc.go +++ b/syz-fuzzer/proc.go @@ -106,13 +106,19 @@ func (proc *Proc) loop() { func (proc *Proc) triageInput(item *WorkTriage) { log.Logf(1, "#%v: triaging type=%x", proc.pid, item.flags) - call := item.p.Calls[item.call] - inputSignal := signal.FromRaw(item.info.Signal, signalPrio(item.p.Target, call, &item.info)) + prio := signalPrio(item.p, &item.info, item.call) + inputSignal := signal.FromRaw(item.info.Signal, prio) newSignal := proc.fuzzer.corpusSignalDiff(inputSignal) if newSignal.Empty() { return } - log.Logf(3, "triaging input for %v (new signal=%v)", call.Meta.CallName, newSignal.Len()) + callName := ".extra" + logCallName := "extra" + if item.call != -1 { + callName = item.p.Calls[item.call].Meta.CallName + logCallName = fmt.Sprintf("call #%v %v", item.call, callName) + } + log.Logf(3, "triaging input for %v (new signal=%v)", logCallName, newSignal.Len()) var inputCover cover.Cover const ( signalRuns = 3 @@ -122,8 +128,7 @@ func (proc *Proc) triageInput(item *WorkTriage) { notexecuted := 0 for i := 0; i < signalRuns; i++ { info := proc.executeRaw(proc.execOptsCover, item.p, StatTriage) - if info == nil || len(info.Calls) == 0 || len(info.Calls[item.call].Signal) == 0 || - item.info.Errno == 0 && info.Calls[item.call].Errno != 0 { + if !reexecutionSuccess(info, &item.info, item.call) { // The call was not executed or failed. notexecuted++ if notexecuted > signalRuns/2+1 { @@ -131,32 +136,25 @@ func (proc *Proc) triageInput(item *WorkTriage) { } continue } - inf := info.Calls[item.call] - thisSignal := signal.FromRaw(inf.Signal, signalPrio(item.p.Target, call, &inf)) + thisSignal, thisCover := getSignalAndCover(item.p, info, item.call) newSignal = newSignal.Intersection(thisSignal) // Without !minimized check manager starts losing some considerable amount // of coverage after each restart. Mechanics of this are not completely clear. if newSignal.Empty() && item.flags&ProgMinimized == 0 { return } - inputCover.Merge(inf.Cover) + inputCover.Merge(thisCover) } if item.flags&ProgMinimized == 0 { item.p, item.call = prog.Minimize(item.p, item.call, false, func(p1 *prog.Prog, call1 int) bool { for i := 0; i < minimizeAttempts; i++ { info := proc.execute(proc.execOptsNoCollide, p1, ProgNormal, StatMinimize) - if info == nil || len(info.Calls) == 0 || len(info.Calls[call1].Signal) == 0 { - continue // The call was not executed. - } - inf := info.Calls[call1] - if item.info.Errno == 0 && inf.Errno != 0 { - // Don't minimize calls from successful to unsuccessful. - // Successful calls are much more valuable. - return false + if !reexecutionSuccess(info, &item.info, call1) { + // The call was not executed or failed. + continue } - prio := signalPrio(p1.Target, p1.Calls[call1], &inf) - thisSignal := signal.FromRaw(inf.Signal, prio) + thisSignal, _ := getSignalAndCover(p1, info, call1) if newSignal.Intersection(thisSignal).Len() == newSignal.Len() { return true } @@ -168,9 +166,9 @@ func (proc *Proc) triageInput(item *WorkTriage) { data := item.p.Serialize() sig := hash.Hash(data) - log.Logf(2, "added new input for %v to corpus:\n%s", call.Meta.CallName, data) + log.Logf(2, "added new input for %v to corpus:\n%s", logCallName, data) proc.fuzzer.sendInputToManager(rpctype.RPCInput{ - Call: call.Meta.CallName, + Call: callName, Prog: data, Signal: inputSignal.Serialize(), Cover: inputCover.Serialize(), @@ -183,11 +181,34 @@ func (proc *Proc) triageInput(item *WorkTriage) { } } +func reexecutionSuccess(info *ipc.ProgInfo, oldInfo *ipc.CallInfo, call int) bool { + if info == nil || len(info.Calls) == 0 { + return false + } + if call != -1 { + // Don't minimize calls from successful to unsuccessful. + // Successful calls are much more valuable. + if oldInfo.Errno == 0 && info.Calls[call].Errno != 0 { + return false + } + return len(info.Calls[call].Signal) != 0 + } + return len(info.Extra.Signal) != 0 +} + +func getSignalAndCover(p *prog.Prog, info *ipc.ProgInfo, call int) (signal.Signal, []uint32) { + inf := &info.Extra + if call != -1 { + inf = &info.Calls[call] + } + return signal.FromRaw(inf.Signal, signalPrio(p, inf, call)), inf.Cover +} + func (proc *Proc) smashInput(item *WorkSmash) { - if proc.fuzzer.faultInjectionEnabled { + if proc.fuzzer.faultInjectionEnabled && item.call != -1 { proc.failCall(item.p, item.call) } - if proc.fuzzer.comparisonTracingEnabled { + if proc.fuzzer.comparisonTracingEnabled && item.call != -1 { proc.executeHintSeed(item.p, item.call) } corpus := proc.fuzzer.corpusSnapshot() @@ -232,23 +253,30 @@ func (proc *Proc) executeHintSeed(p *prog.Prog, call int) { func (proc *Proc) execute(execOpts *ipc.ExecOpts, p *prog.Prog, flags ProgTypes, stat Stat) *ipc.ProgInfo { info := proc.executeRaw(execOpts, p, stat) - for _, callIndex := range proc.fuzzer.checkNewSignal(p, info) { - info := info.Calls[callIndex] - // info.Signal points to the output shmem region, detach it before queueing. - info.Signal = append([]uint32{}, info.Signal...) - // None of the caller use Cover, so just nil it instead of detaching. - // Note: triage input uses executeRaw to get coverage. - info.Cover = nil - proc.fuzzer.workQueue.enqueue(&WorkTriage{ - p: p.Clone(), - call: callIndex, - info: info, - flags: flags, - }) + calls, extra := proc.fuzzer.checkNewSignal(p, info) + for _, callIndex := range calls { + proc.enqueueCallTriage(p, flags, callIndex, info.Calls[callIndex]) + } + if extra { + proc.enqueueCallTriage(p, flags, -1, info.Extra) } return info } +func (proc *Proc) enqueueCallTriage(p *prog.Prog, flags ProgTypes, callIndex int, info ipc.CallInfo) { + // info.Signal points to the output shmem region, detach it before queueing. + info.Signal = append([]uint32{}, info.Signal...) + // None of the caller use Cover, so just nil it instead of detaching. + // Note: triage input uses executeRaw to get coverage. + info.Cover = nil + proc.fuzzer.workQueue.enqueue(&WorkTriage{ + p: p.Clone(), + call: callIndex, + info: info, + flags: flags, + }) +} + func (proc *Proc) executeRaw(opts *ipc.ExecOpts, p *prog.Prog, stat Stat) *ipc.ProgInfo { if opts.Flags&ipc.FlagDedupCover == 0 { log.Fatalf("dedup cover is not enabled") diff --git a/tools/syz-execprog/execprog.go b/tools/syz-execprog/execprog.go index 5965677be..1a4468063 100644 --- a/tools/syz-execprog/execprog.go +++ b/tools/syz-execprog/execprog.go @@ -148,7 +148,7 @@ func (ctx *Context) execute(pid int, env *ipc.Env, entry *prog.LogEntry) { log.Logf(0, "result: failed=%v hanged=%v err=%v\n\n%s", failed, hanged, err, output) } - if info != nil && len(info.Calls) != 0 { + if info != nil { ctx.printCallResults(info) if *flagHints { ctx.printHints(entry.P, info) @@ -220,21 +220,27 @@ func (ctx *Context) printHints(p *prog.Prog, info *ipc.ProgInfo) { log.Logf(0, "ncomps=%v ncandidates=%v", ncomps, ncandidates) } +func (ctx *Context) dumpCallCoverage(coverFile string, info *ipc.CallInfo) { + if len(info.Cover) == 0 { + return + } + buf := new(bytes.Buffer) + for _, pc := range info.Cover { + fmt.Fprintf(buf, "0x%x\n", cover.RestorePC(pc, 0xffffffff)) + } + err := osutil.WriteFile(coverFile, buf.Bytes()) + if err != nil { + log.Fatalf("failed to write coverage file: %v", err) + } +} + func (ctx *Context) dumpCoverage(coverFile string, info *ipc.ProgInfo) { for i, inf := range info.Calls { log.Logf(0, "call #%v: signal %v, coverage %v", i, len(inf.Signal), len(inf.Cover)) - if len(inf.Cover) == 0 { - continue - } - buf := new(bytes.Buffer) - for _, pc := range inf.Cover { - fmt.Fprintf(buf, "0x%x\n", cover.RestorePC(pc, 0xffffffff)) - } - err := osutil.WriteFile(fmt.Sprintf("%v.%v", coverFile, i), buf.Bytes()) - if err != nil { - log.Fatalf("failed to write coverage file: %v", err) - } + ctx.dumpCallCoverage(fmt.Sprintf("%v.%v", coverFile, i), &inf) } + log.Logf(0, "extra: signal %v, coverage %v", len(info.Extra.Signal), len(info.Extra.Cover)) + ctx.dumpCallCoverage(fmt.Sprintf("%v.extra", coverFile), &info.Extra) } func (ctx *Context) getProgramIndex() int { |
