aboutsummaryrefslogtreecommitdiffstats
path: root/pkg/csource/csource.go
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2020-07-13 16:24:56 +0200
committerDmitry Vyukov <dvyukov@google.com>2020-07-15 09:26:23 +0200
commitb458f2c1a61c78e2004be6b4ef60b45fb81bd684 (patch)
tree9be99ed7ae69bb21d00abe32a32a0886fd1d53af /pkg/csource/csource.go
parent6b24f02a1122b986a5778bcb442ebabed406eeb1 (diff)
executor: wrap all syscalls into NONFAILING
Currently we sprinkle NONFAILING all over pseudo-syscall code, around all individual accesses to fuzzer-generated pointers. This is tedious manual work and subject to errors. Wrap execute_syscall invocation with NONFAILING in execute_call once instead. Then we can remove NONFAILING from all pseudo-syscalls and never get back to this. Potential downsides: (1) this is coarser-grained and we will skip whole syscall on invalid pointer, but this is how normal syscalls work as well, so should not be a problem; (2) we will skip any clean up (closing of files, etc) as well; but this may be fine as well (programs can perfectly leave open file descriptors as well). Update #1918
Diffstat (limited to 'pkg/csource/csource.go')
-rw-r--r--pkg/csource/csource.go79
1 files changed, 47 insertions, 32 deletions
diff --git a/pkg/csource/csource.go b/pkg/csource/csource.go
index e679468df..9cd456533 100644
--- a/pkg/csource/csource.go
+++ b/pkg/csource/csource.go
@@ -212,40 +212,24 @@ func (ctx *context) emitCall(w *bytes.Buffer, call prog.ExecCall, ci int, haveCo
_, trampoline := ctx.sysTarget.SyscallTrampolines[callName]
native := ctx.sysTarget.SyscallNumbers && !strings.HasPrefix(callName, "syz_") && !trampoline
fmt.Fprintf(w, "\t")
- if haveCopyout || trace {
- fmt.Fprintf(w, "res = ")
- }
- ctx.emitCallName(w, call, native)
- for ai, arg := range call.Args {
- if native || ai > 0 {
- fmt.Fprintf(w, ", ")
+ if !native {
+ // This mimics the same as executor does for execute_syscall,
+ // but only for non-native syscalls to reduce clutter (native syscalls are assumed to not crash).
+ // Arrange for res = -1 in case of syscall abort, we care about errno only if we are tracing for pkg/runtest.
+ if haveCopyout || trace {
+ fmt.Fprintf(w, "res = -1;\n\t")
}
- switch arg := arg.(type) {
- case prog.ExecArgConst:
- if arg.Format != prog.FormatNative && arg.Format != prog.FormatBigEndian {
- panic("sring format in syscall argument")
- }
- fmt.Fprintf(w, "%v", ctx.constArgToStr(arg, true, native))
- case prog.ExecArgResult:
- if arg.Format != prog.FormatNative && arg.Format != prog.FormatBigEndian {
- panic("sring format in syscall argument")
- }
- val := ctx.resultArgToStr(arg)
- if native && ctx.target.PtrSize == 4 {
- // syscall accepts args as ellipsis, resources are uint64
- // and take 2 slots without the cast, which would be wrong.
- val = "(intptr_t)" + val
- }
- fmt.Fprintf(w, "%v", val)
- default:
- panic(fmt.Sprintf("unknown arg type: %+v", arg))
+ if trace {
+ fmt.Fprintf(w, "errno = EFAULT;\n\t")
}
+ fmt.Fprintf(w, "NONFAILING(")
}
- for i := 0; i < call.Meta.MissingArgs; i++ {
- if native || len(call.Args) != 0 {
- fmt.Fprintf(w, ", ")
- }
- fmt.Fprintf(w, "0")
+ if haveCopyout || trace {
+ fmt.Fprintf(w, "res = ")
+ }
+ ctx.emitCallBody(w, call, native)
+ if !native {
+ fmt.Fprintf(w, ")") // close NONFAILING macro
}
fmt.Fprintf(w, ");")
comment := ctx.target.AnnotateCall(call)
@@ -264,7 +248,7 @@ func (ctx *context) emitCall(w *bytes.Buffer, call prog.ExecCall, ci int, haveCo
}
}
-func (ctx *context) emitCallName(w *bytes.Buffer, call prog.ExecCall, native bool) {
+func (ctx *context) emitCallBody(w *bytes.Buffer, call prog.ExecCall, native bool) {
callName, ok := ctx.sysTarget.SyscallTrampolines[call.Meta.CallName]
if !ok {
callName = call.Meta.CallName
@@ -280,6 +264,37 @@ func (ctx *context) emitCallName(w *bytes.Buffer, call prog.ExecCall, native boo
}
fmt.Fprintf(w, "((intptr_t(*)(%v))CAST(%v))(", args, callName)
}
+ for ai, arg := range call.Args {
+ if native || ai > 0 {
+ fmt.Fprintf(w, ", ")
+ }
+ switch arg := arg.(type) {
+ case prog.ExecArgConst:
+ if arg.Format != prog.FormatNative && arg.Format != prog.FormatBigEndian {
+ panic("sring format in syscall argument")
+ }
+ fmt.Fprintf(w, "%v", ctx.constArgToStr(arg, true, native))
+ case prog.ExecArgResult:
+ if arg.Format != prog.FormatNative && arg.Format != prog.FormatBigEndian {
+ panic("sring format in syscall argument")
+ }
+ val := ctx.resultArgToStr(arg)
+ if native && ctx.target.PtrSize == 4 {
+ // syscall accepts args as ellipsis, resources are uint64
+ // and take 2 slots without the cast, which would be wrong.
+ val = "(intptr_t)" + val
+ }
+ fmt.Fprintf(w, "%v", val)
+ default:
+ panic(fmt.Sprintf("unknown arg type: %+v", arg))
+ }
+ }
+ for i := 0; i < call.Meta.MissingArgs; i++ {
+ if native || len(call.Args) != 0 {
+ fmt.Fprintf(w, ", ")
+ }
+ fmt.Fprintf(w, "0")
+ }
}
func (ctx *context) generateCsumInet(w *bytes.Buffer, addr uint64, arg prog.ExecArgCsum, csumSeq int) {