aboutsummaryrefslogtreecommitdiffstats
path: root/executor/executor_bsd.h
diff options
context:
space:
mode:
authorAndrew Turner <andrew@fubar.geek.nz>2019-06-03 13:19:51 +0000
committerDmitry Vyukov <dvyukov@google.com>2019-06-04 16:24:36 +0200
commitbfb4a51e30c8c04658a2675333b9b89a9d327c4a (patch)
tree7a0bd32ab317f8652e12bed660c84eb97984c693 /executor/executor_bsd.h
parentad87cdf3c743711389a67930d722e89c127ab1e5 (diff)
executor: Protect the coverage buffer
Add functions to protect and unprotect the coverage buffer. The buffer is protected from being written to while tracing. When the trace data is sorted we need to make it read/write, but can return it to read only after this has completed. Leave the first page as read/write as we need to clear the length field.
Diffstat (limited to 'executor/executor_bsd.h')
-rw-r--r--executor/executor_bsd.h19
1 files changed, 19 insertions, 0 deletions
diff --git a/executor/executor_bsd.h b/executor/executor_bsd.h
index a642d91e1..45d0cfe84 100644
--- a/executor/executor_bsd.h
+++ b/executor/executor_bsd.h
@@ -80,6 +80,25 @@ static void cover_open(cover_t* cov, bool extra)
cov->data_end = cov->data + mmap_alloc_size;
}
+static void cover_protect(cover_t* cov)
+{
+#if GOOS_freebsd
+ size_t mmap_alloc_size = kCoverSize * KCOV_ENTRY_SIZE;
+ long page_size = sysconf(_SC_PAGESIZE);
+ if (page_size > 0)
+ mprotect(cov->data + page_size, mmap_alloc_size - page_size,
+ PROT_READ);
+#endif
+}
+
+static void cover_unprotect(cover_t* cov)
+{
+#if GOOS_freebsd
+ size_t mmap_alloc_size = kCoverSize * KCOV_ENTRY_SIZE;
+ mprotect(cov->data, mmap_alloc_size, PROT_READ | PROT_WRITE);
+#endif
+}
+
static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
{
int kcov_mode = collect_comps ? KCOV_MODE_TRACE_CMP : KCOV_MODE_TRACE_PC;