aboutsummaryrefslogtreecommitdiffstats
path: root/executor
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2018-04-16 21:52:40 +0200
committerDmitry Vyukov <dvyukov@google.com>2018-04-16 21:52:40 +0200
commitb80fd3b5d429f4d59731a40bf2bcda0c571365e0 (patch)
tree94c03461362f53d5befb1aa915ccad4479e3a7b6 /executor
parent5e6a2eea61587ea617ca9b6677ea1d7181132e12 (diff)
executor: support 32-bit kernels
There is no autodetection yet, but at least bitness is encapsulated in cover_t type.
Diffstat (limited to 'executor')
-rw-r--r--executor/executor.h22
-rw-r--r--executor/executor_linux.cc24
2 files changed, 26 insertions, 20 deletions
diff --git a/executor/executor.h b/executor/executor.h
index 915fecb14..88075a11d 100644
--- a/executor/executor.h
+++ b/executor/executor.h
@@ -79,15 +79,18 @@ const uint64 arg_csum_inet = 0;
const uint64 arg_csum_chunk_data = 0;
const uint64 arg_csum_chunk_const = 1;
+// TODO(dvyukov): for 32-bit kernel this needs to be uint32.
+typedef uint64 cover_t;
+
struct thread_t {
bool created;
int id;
osthread_t th;
// TODO(dvyukov): this assumes 64-bit kernel. This must be "kernel long" somehow.
- uint64* cover_data;
+ cover_t* cover_data;
// Pointer to the size of coverage (stored as first word of memory).
- uint64* cover_size_ptr;
- uint64 cover_buffer[1]; // fallback coverage buffer
+ cover_t* cover_size_ptr;
+ cover_t cover_buffer[1]; // fallback coverage buffer
event_t ready;
event_t done;
@@ -101,7 +104,7 @@ struct thread_t {
long args[kMaxArgs];
long res;
uint32 reserrno;
- uint64 cover_size;
+ cover_t cover_size;
bool fault_injected;
int cover_fd;
};
@@ -154,6 +157,7 @@ enum {
};
struct kcov_comparison_t {
+ // Note: comparisons are always 64-bits regardless of kernel bitness.
uint64 type;
uint64 arg1;
uint64 arg2;
@@ -182,7 +186,7 @@ bool copyout(char* addr, uint64 size, uint64* res);
void cover_open();
void cover_enable(thread_t* th);
void cover_reset(thread_t* th);
-uint64 read_cover_size(thread_t* th);
+cover_t read_cover_size(thread_t* th);
static uint32 hash(uint32 a);
static bool dedup(uint32 sig);
@@ -536,10 +540,12 @@ void handle_completion(thread_t* th)
if (flag_collect_comps) {
// Collect only the comparisons
+ // TODO(dvyukov): this is broken for 32-bit kernels.
+ // cover_data is offsetted by cover_t, but kernel always offsetted it by uint64.
uint32 ncomps = th->cover_size;
kcov_comparison_t* start = (kcov_comparison_t*)th->cover_data;
kcov_comparison_t* end = start + ncomps;
- if ((uint64*)end >= th->cover_data + kCoverSize)
+ if ((cover_t*)end >= th->cover_data + kCoverSize)
fail("too many comparisons %u", ncomps);
std::sort(start, end);
ncomps = std::unique(start, end) - start;
@@ -567,8 +573,8 @@ void handle_completion(thread_t* th)
// Write out real coverage (basic block PCs).
cover_size = th->cover_size;
if (flag_dedup_cover) {
- uint64* start = (uint64*)th->cover_data;
- uint64* end = start + cover_size;
+ cover_t* start = th->cover_data;
+ cover_t* end = start + cover_size;
std::sort(start, end);
cover_size = std::unique(start, end) - start;
}
diff --git a/executor/executor_linux.cc b/executor/executor_linux.cc
index b1c0a5e42..070d2bc94 100644
--- a/executor/executor_linux.cc
+++ b/executor/executor_linux.cc
@@ -25,8 +25,8 @@
#include "syscalls_linux.h"
-#define KCOV_INIT_TRACE _IOR('c', 1, unsigned long long)
-#define KCOV_INIT_CMP _IOR('c', 2, unsigned long long)
+#define KCOV_INIT_TRACE _IOR('c', 1, cover_t)
+#define KCOV_INIT_CMP _IOR('c', 2, cover_t)
#define KCOV_ENABLE _IO('c', 100)
#define KCOV_DISABLE _IO('c', 101)
@@ -36,8 +36,8 @@ const unsigned long KCOV_TRACE_CMP = 1;
const int kInFd = 3;
const int kOutFd = 4;
-// The address chosen must also work on 32-bit kernels with 2GB user address space.
-void* const kOutputDataAddr = (void*)0x1b9bc20000ull;
+// The address chosen must also work on 32-bit kernels with 1GB user address space.
+void* const kOutputDataAddr = (void*)0x1b2bc20000ull;
uint32* output_data;
uint32* output_pos;
@@ -140,12 +140,12 @@ void cover_open()
if (ioctl(th->cover_fd, KCOV_INIT_TRACE, kCoverSize))
fail("cover init trace write failed");
size_t mmap_alloc_size = kCoverSize * sizeof(th->cover_data[0]);
- uint64* mmap_ptr = (uint64*)mmap(NULL, mmap_alloc_size,
- PROT_READ | PROT_WRITE, MAP_SHARED, th->cover_fd, 0);
+ void* mmap_ptr = mmap(NULL, mmap_alloc_size,
+ PROT_READ | PROT_WRITE, MAP_SHARED, th->cover_fd, 0);
if (mmap_ptr == MAP_FAILED)
fail("cover mmap failed");
- th->cover_size_ptr = mmap_ptr;
- th->cover_data = &mmap_ptr[1];
+ th->cover_size_ptr = (cover_t*)mmap_ptr;
+ th->cover_data = &th->cover_size_ptr[1];
}
}
@@ -173,14 +173,14 @@ void cover_reset(thread_t* th)
__atomic_store_n(th->cover_size_ptr, 0, __ATOMIC_RELAXED);
}
-uint64 read_cover_size(thread_t* th)
+cover_t read_cover_size(thread_t* th)
{
if (!flag_cover)
return 0;
- uint64 n = __atomic_load_n(th->cover_size_ptr, __ATOMIC_RELAXED);
- debug("#%d: read cover size = %llu\n", th->id, n);
+ cover_t n = __atomic_load_n(th->cover_size_ptr, __ATOMIC_RELAXED);
+ debug("#%d: read cover size = %llu\n", th->id, (uint64)n);
if (n >= kCoverSize)
- fail("#%d: too much cover %llu", th->id, n);
+ fail("#%d: too much cover %llu", th->id, (uint64)n);
return n;
}