diff options
| author | Aleksandr Nogikh <nogikh@google.com> | 2021-12-03 13:58:21 +0000 |
|---|---|---|
| committer | Aleksandr Nogikh <wp32pw@gmail.com> | 2021-12-03 18:20:11 +0100 |
| commit | a617004c2317ce7443e2fff7415ddab9ac765afc (patch) | |
| tree | 93e1acdcbf47ebab69eb573cca4e4e93b40f181e /executor/executor_linux.h | |
| parent | c7c20675f58e3edaa53538928c0963144fd524e5 (diff) | |
executor: delay kcov mmap until it is needed
The previous strategy (delay kcov instance creation) seems not to work
very well in carefully sandboxed environments. Let's see if the new
approach is more versatile.
Open a kcov handle for each thread at syz-executor's initialization, but
don't mmap it right away.
Diffstat (limited to 'executor/executor_linux.h')
| -rw-r--r-- | executor/executor_linux.h | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/executor/executor_linux.h b/executor/executor_linux.h index 7d2780c0f..8666d929b 100644 --- a/executor/executor_linux.h +++ b/executor/executor_linux.h @@ -85,14 +85,7 @@ static void cover_open(cover_t* cov, bool extra) const int cover_size = extra ? kExtraCoverSize : kCoverSize; if (ioctl(cov->fd, kcov_init_trace, cover_size)) fail("cover init trace write failed"); - size_t mmap_alloc_size = cover_size * (is_kernel_64_bit ? 8 : 4); - cov->data = (char*)mmap(NULL, mmap_alloc_size, - PROT_READ | PROT_WRITE, MAP_SHARED, cov->fd, 0); - if (cov->data == MAP_FAILED) - fail("cover mmap failed"); - cov->data_end = cov->data + mmap_alloc_size; - cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t); - cov->pc_offset = 0; + cov->mmap_alloc_size = cover_size * (is_kernel_64_bit ? 8 : 4); } static void cover_protect(cover_t* cov) @@ -103,6 +96,21 @@ static void cover_unprotect(cover_t* cov) { } +static void cover_mmap(cover_t* cov) +{ + if (cov->data != NULL) + fail("cover_mmap invoked on an already mmapped cover_t object"); + if (cov->mmap_alloc_size == 0) + fail("cover_t structure is corrupted"); + cov->data = (char*)mmap(NULL, cov->mmap_alloc_size, + PROT_READ | PROT_WRITE, MAP_SHARED, cov->fd, 0); + if (cov->data == MAP_FAILED) + exitf("cover mmap failed"); + cov->data_end = cov->data + cov->mmap_alloc_size; + cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t); + cov->pc_offset = 0; +} + static void cover_enable(cover_t* cov, bool collect_comps, bool extra) { unsigned int kcov_mode = collect_comps ? KCOV_TRACE_CMP : KCOV_TRACE_PC; @@ -147,14 +155,6 @@ static void cover_collect(cover_t* cov) cov->size = *(uint32*)cov->data; } -static void cover_reserve_fd(cover_t* cov) -{ - int fd = open("/dev/null", O_RDONLY); - if (fd < 0) - fail("failed to open /dev/null"); - dup2(fd, cov->fd); -} - static bool use_cover_edges(uint32 pc) { return true; |
