diff options
| author | Victor Chibotaru <tchibo@google.com> | 2017-08-16 21:18:04 +0200 |
|---|---|---|
| committer | Dmitry Vyukov <dvyukov@google.com> | 2017-08-30 18:40:14 +0200 |
| commit | 07c84b670b4a25a7795e9fb8d47abe0922d2976b (patch) | |
| tree | 97487c90593aeb8c8773762790f133ac0bc6a3b7 /executor/executor.cc | |
| parent | 1336586b42f6118b19c3da932fd615e85a47c0b5 (diff) | |
executor, ipc: modify the IO between KCOV<->executor<->fuzzer
Now executor is able to read comparisons data from KCOV and write them
to fuzzer.
Diffstat (limited to 'executor/executor.cc')
| -rw-r--r-- | executor/executor.cc | 153 |
1 files changed, 116 insertions, 37 deletions
diff --git a/executor/executor.cc b/executor/executor.cc index fdd44e246..8f5e221ac 100644 --- a/executor/executor.cc +++ b/executor/executor.cc @@ -106,6 +106,75 @@ struct res_t { res_t results[kMaxCommands]; +enum { + KCOV_CMP_CONST = 1, + KCOV_CMP_SIZE1 = 0, + KCOV_CMP_SIZE2 = 2, + KCOV_CMP_SIZE4 = 4, + KCOV_CMP_SIZE8 = 6, + KCOV_CMP_SIZE_MASK = 6, +}; + +struct kcov_comparison_t { + uint64_t type; + uint64_t arg1; + uint64_t arg2; + + bool operator==(const struct kcov_comparison_t& other) const + { + return type == other.type && arg1 == other.arg1 && arg2 == other.arg2; + } + + bool operator<(const struct kcov_comparison_t& other) const + { + if (type != other.type) + return type < other.type; + if (arg1 != other.arg1) + return arg1 < other.arg1; + return arg2 < other.arg2; + } + + // Writes the structure using the write_one function for each field. + // Inspired by write_output() function. + void write(uint32_t* (*write_one)(uint32_t)) + { + // Write order: type arg1 arg2. + write_one((uint32_t)type); + + // KCOV converts all arguments of size x first to uintx_t and then to + // uint64_t. We want to properly extend signed values, e.g we want + // int8_t c = 0xfe to be represented as 0xfffffffffffffffe. + // Note that uint8_t c = 0xfe will be represented the same way. + // This is ok because during hints processing we will anyways try + // the value 0x00000000000000fe. + switch (type & KCOV_CMP_SIZE_MASK) { + case KCOV_CMP_SIZE1: + arg1 = (uint64_t)(int64_t)(int8_t)arg1; + arg2 = (uint64_t)(int64_t)(int8_t)arg2; + break; + case KCOV_CMP_SIZE2: + arg1 = (uint64_t)(int64_t)(int16_t)arg1; + arg2 = (uint64_t)(int64_t)(int16_t)arg2; + break; + case KCOV_CMP_SIZE4: + arg1 = (uint64_t)(int64_t)(int32_t)arg1; + arg2 = (uint64_t)(int64_t)(int32_t)arg2; + break; + } + bool is_size_8 = (type & KCOV_CMP_SIZE_MASK) == KCOV_CMP_SIZE8; + if (!is_size_8) { + write_one((uint32_t)arg1); + write_one((uint32_t)arg2); + return; + } + // If we have 64 bits arguments then write them in Little-endian. + write_one((uint32_t)(arg1 & 0xFFFFFFFF)); + write_one((uint32_t)(arg1 >> 32)); + write_one((uint32_t)(arg2 & 0xFFFFFFFF)); + write_one((uint32_t)(arg2 >> 32)); + } +}; + struct thread_t { bool created; int id; @@ -599,46 +668,56 @@ void handle_completion(thread_t* th) write_output(th->fault_injected); uint32_t* signal_count_pos = write_output(0); // filled in later uint32_t* cover_count_pos = write_output(0); // filled in later - - // Write out feedback signals. - // Currently it is code edges computed as xor of two subsequent basic block PCs. - uint64_t* cover_data = th->cover_data + 1; - uint32_t cover_size = th->cover_size; - uint32_t prev = 0; - uint32_t nsig = 0; - for (uint32_t i = 0; i < cover_size; i++) { - uint32_t pc = cover_data[i]; - uint32_t sig = pc ^ prev; - prev = hash(pc); - if (dedup(sig)) - continue; - write_output(sig); - nsig++; - } - *signal_count_pos = nsig; - if (flag_collect_cover) { - // Write out real coverage (basic block PCs). - if (flag_dedup_cover) { - std::sort(cover_data, cover_data + cover_size); - uint64_t w = 0; - uint64_t last = 0; - for (uint32_t i = 0; i < cover_size; i++) { - uint64_t pc = cover_data[i]; - if (pc == last) - continue; - cover_data[w++] = last = pc; + uint32_t* comps_count_pos = write_output(0); // filled in later + uint32_t nsig = 0, cover_size = 0, comps_size = 0; + + if (flag_collect_comps) { + // Collect only the comparisons + comps_size = th->cover_size; + kcov_comparison_t* start = (kcov_comparison_t*)th->cover_data; + kcov_comparison_t* end = start + comps_size; + std::sort(start, end); + comps_size = std::unique(start, end) - start; + for (uint32_t i = 0; i < comps_size; ++i) + start[i].write(write_output); + } else { + // Write out feedback signals. + // Currently it is code edges computed as xor of + // two subsequent basic block PCs. + uint32_t prev = 0; + for (uint32_t i = 0; i < th->cover_size; i++) { + uint32_t pc = (uint32_t)th->cover_data[i]; + uint32_t sig = pc ^ prev; + prev = hash(pc); + if (dedup(sig)) + continue; + write_output(sig); + nsig++; + } + if (flag_collect_cover) { + // Write out real coverage (basic block PCs). + cover_size = th->cover_size; + if (flag_dedup_cover) { + uint64_t* start = (uint64_t*)th->cover_data; + uint64_t* end = start + cover_size; + std::sort(start, end); + cover_size = std::unique(start, end) - start; } - cover_size = w; + // Truncate PCs to uint32_t assuming that they fit into 32-bits. + // True for x86_64 and arm64 without KASLR. + for (uint32_t i = 0; i < cover_size; i++) + write_output((uint32_t)th->cover_data[i]); } - // Truncate PCs to uint32_t assuming that they fit into 32-bits. - // True for x86_64 and arm64 without KASLR. - for (uint32_t i = 0; i < cover_size; i++) - write_output((uint32_t)cover_data[i]); - *cover_count_pos = cover_size; } - debug("out #%u: index=%u num=%u errno=%d sig=%u cover=%u\n", - completed, th->call_index, th->call_num, reserrno, nsig, cover_size); - + // Write out real coverage (basic block PCs). + *cover_count_pos = cover_size; + // Write out number of comparisons + *comps_count_pos = comps_size; + // Write out number of signals + *signal_count_pos = nsig; + debug("out #%u: index=%u num=%u errno=%d sig=%u cover=%u comps=%u\n", + completed, th->call_index, th->call_num, reserrno, nsig, + cover_size, comps_size); completed++; __atomic_store_n(output_data, completed, __ATOMIC_RELEASE); } |
