aboutsummaryrefslogtreecommitdiffstats
path: root/executor
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2019-04-02 13:43:46 +0200
committerDmitry Vyukov <dvyukov@google.com>2019-04-02 13:43:46 +0200
commitdfd3394d42ddd333c68cf355273b312da8c65a51 (patch)
tree5b1c9a9cd40ca5c5c7226de2824fd5015d7e0820 /executor
parent3f57b235da53aa226b61483f5f4f350df45793c7 (diff)
executor: try to prevent machine outbreak
The fuzzer gained control over host machines again with something like: syz_execute_func(&(0x7f00000000c0)="c4827d5a6e0d5e57c3c3b7d95a91914e424a2664f0ff065b460f343030062e67660f50e900004681e400000100440fe531feabc4aba39d6c450754ddea420fae9972b571112d02") Let's see if perturbing syz_execute_func a bit and wiping registers will stop the outbreak.
Diffstat (limited to 'executor')
-rw-r--r--executor/common.h14
1 files changed, 13 insertions, 1 deletions
diff --git a/executor/common.h b/executor/common.h
index 9531d4a3e..a0ed0026c 100644
--- a/executor/common.h
+++ b/executor/common.h
@@ -383,7 +383,19 @@ static uint16 csum_inet_digest(struct csum_inet* csum)
// syz_execute_func(text ptr[in, text[taget]])
static long syz_execute_func(volatile long text)
{
- ((void (*)(void))(text))();
+ // Here we just to random code which is inherently unsafe.
+ // But we only care about coverage in the output region.
+ // The following code tries to remove left-over pointers in registers
+ // from the reach of the random code, otherwise it's known to reach
+ // the output region somehow. The asm block is arch-independent except
+ // for the number of available registers.
+ volatile long p[8] = {0};
+ (void)p;
+#if GOARCH_amd64
+ asm volatile("" ::"r"(0l), "r"(1l), "r"(2l), "r"(3l), "r"(4l), "r"(5l), "r"(6l),
+ "r"(7l), "r"(8l), "r"(9l), "r"(10l), "r"(11l), "r"(12l), "r"(13l));
+#endif
+ NONFAILING(((void (*)(void))(text))());
return 0;
}
#endif