aboutsummaryrefslogtreecommitdiffstats
path: root/pkg
diff options
context:
space:
mode:
authorAleksandr Nogikh <nogikh@google.com>2021-10-01 14:43:02 +0000
committerAleksandr Nogikh <wp32pw@gmail.com>2021-10-01 17:45:20 +0200
commitdb0f57870523a9bff1629dad1a340ba8aac79d82 (patch)
tree33b82e919cef96856bf952c78c76190885877fdd /pkg
parentcc80db955d0551c2456692da6176530dd27e08ed (diff)
pkg/csource: remove calls instead of skipping them
Currently csource skips calls at the very last moment, which has an unpleasant consequence - if we make choice of enabled defines depend on the individual calls or call properties, we may end up with defined yet unused functions. The perfect solution would be to untie syz_emit_ethernet/syz_extract_tcp_res and NetInjection, and also to untie VhciInjection and syz_emit_vhci. For the time being, move these checks to the very beginning of csource processing, so that these calls could be removed before we construct our defines. Adjust pkg/csource/csource_test.go to better cover fault injection generation problems.
Diffstat (limited to 'pkg')
-rw-r--r--pkg/csource/csource.go45
-rw-r--r--pkg/csource/csource_test.go8
2 files changed, 35 insertions, 18 deletions
diff --git a/pkg/csource/csource.go b/pkg/csource/csource.go
index 5d0d4efd1..df5b60896 100644
--- a/pkg/csource/csource.go
+++ b/pkg/csource/csource.go
@@ -48,6 +48,7 @@ func Write(p *prog.Prog, opts Options) ([]byte, error) {
calls: make(map[string]uint64),
}
+ ctx.filterCalls()
calls, vars, err := ctx.generateProgCalls(ctx.p, opts.Trace)
if err != nil {
return nil, err
@@ -121,11 +122,37 @@ type context struct {
calls map[string]uint64 // CallName -> NR
}
+// This is a kludge, but we keep it here until a better approach is implemented.
+// TODO: untie syz_emit_ethernet/syz_extract_tcp_res and NetInjection. And also
+// untie VhciInjection and syz_emit_vhci. Then we could remove this method.
+func (ctx *context) filterCalls() {
+ p := ctx.p
+ for i := 0; i < len(p.Calls); {
+ call := p.Calls[i]
+ callName := call.Meta.CallName
+ emitCall := (ctx.opts.NetInjection ||
+ callName != "syz_emit_ethernet" &&
+ callName != "syz_extract_tcp_res") &&
+ (ctx.opts.VhciInjection || callName != "syz_emit_vhci")
+ if emitCall {
+ i++
+ continue
+ }
+ // Remove the call.
+ if ctx.p == p {
+ // We lazily clone the program to avoid unnecessary copying.
+ p = ctx.p.Clone()
+ }
+ p.RemoveCall(i)
+ }
+ ctx.p = p
+}
+
func (ctx *context) generateSyscalls(calls []string, hasVars bool) string {
opts := ctx.opts
buf := new(bytes.Buffer)
if !opts.Threaded && !opts.Collide {
- if hasVars || opts.Trace {
+ if len(calls) > 0 && (hasVars || opts.Trace) {
fmt.Fprintf(buf, "\tintptr_t res = 0;\n")
}
if opts.Repro {
@@ -137,7 +164,7 @@ func (ctx *context) generateSyscalls(calls []string, hasVars bool) string {
for _, c := range calls {
fmt.Fprintf(buf, "%s", c)
}
- } else {
+ } else if len(calls) > 0 {
if hasVars || opts.Trace {
fmt.Fprintf(buf, "\tintptr_t res = 0;\n")
}
@@ -207,20 +234,10 @@ func (ctx *context) generateCalls(p prog.ExecProg, trace bool) ([]string, []uint
fmt.Fprintf(w, "\tinject_fault(%v);\n", call.Props.FailNth)
}
// Call itself.
- callName := call.Meta.CallName
resCopyout := call.Index != prog.ExecNoCopyout
argCopyout := len(call.Copyout) != 0
- emitCall := (ctx.opts.NetInjection ||
- callName != "syz_emit_ethernet" &&
- callName != "syz_extract_tcp_res") &&
- (ctx.opts.VhciInjection || callName != "syz_emit_vhci")
- // TODO: if we don't emit the call we must also not emit copyin, copyout and fault injection.
- // However, simply skipping whole iteration breaks tests due to unused static functions.
- if emitCall {
- ctx.emitCall(w, call, ci, resCopyout || argCopyout, trace)
- } else if trace {
- fmt.Fprintf(w, "\t(void)res;\n")
- }
+
+ ctx.emitCall(w, call, ci, resCopyout || argCopyout, trace)
// Copyout.
if resCopyout || argCopyout {
diff --git a/pkg/csource/csource_test.go b/pkg/csource/csource_test.go
index ebf22d5f9..aa242e876 100644
--- a/pkg/csource/csource_test.go
+++ b/pkg/csource/csource_test.go
@@ -58,10 +58,6 @@ func testTarget(t *testing.T, target *prog.Target, full bool) {
// Testing 2 programs takes too long since we have lots of options permutations and OS/arch.
// So we use the as-is in short tests and minimized version in full tests.
syzProg := target.GenerateAllSyzProg(rs)
- if len(syzProg.Calls) > 0 {
- // Test fault injection generation as well.
- p.Calls[0].Props.FailNth = 1
- }
var opts []Options
if !full || testing.Short() {
p.Calls = append(p.Calls, syzProg.Calls...)
@@ -74,6 +70,10 @@ func testTarget(t *testing.T, target *prog.Target, full bool) {
p.Calls = append(p.Calls, minimized.Calls...)
opts = allOptionsPermutations(target.OS)
}
+ if len(p.Calls) > 0 {
+ // Test fault injection code generation as well.
+ p.Calls[0].Props.FailNth = 1
+ }
for opti, opts := range opts {
if testing.Short() && opts.HandleSegv {
// HandleSegv can radically increase compilation time/memory consumption on large programs.