1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
|
// Copyright 2021 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
// package main starts the syz-verifier tool. High-level documentation can be
// found in docs/syz_verifier.md.
package main
import (
"flag"
"io"
"math/rand"
"os"
"path/filepath"
"strconv"
"time"
"github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/mgrconfig"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/report"
"github.com/google/syzkaller/pkg/tool"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/vm"
)
const (
maxResultReports = 100
)
// poolInfo contains kernel-specific information for spawning virtual machines
// and reporting crashes. It also keeps track of the Runners executing on
// spawned VMs, what programs have been sent to each Runner and what programs
// have yet to be sent on any of the Runners.
type poolInfo struct {
cfg *mgrconfig.Config
pool *vm.Pool
Reporter *report.Reporter
// runners keeps track of what programs have been sent to each Runner.
// There is one Runner executing per VM instance.
runners map[int]runnerProgs
// progs stores the programs that haven't been sent to this kernel yet but
// have been sent to at least one other kernel.
progs []*progInfo
// toRerun stores the programs that still need to be rerun by this kernel.
toRerun []*progInfo
// checked is set to true when the set of system calls not supported on the
// kernel is known.
checked bool
}
type progInfo struct {
prog *prog.Prog
idx int
serialized []byte
res [][]*ExecResult
// received stores the number of results received for this program.
received int
runIdx int
report *ResultReport
}
type runnerProgs map[int]*progInfo
func main() {
var cfgs tool.CfgsFlag
flag.Var(&cfgs, "configs", "[MANDATORY] list of at least two kernel-specific comma-sepatated configuration files")
flagDebug := flag.Bool("debug", false, "dump all VM output to console")
flagStats := flag.String("stats", "", "where stats will be written when"+
"execution of syz-verifier finishes, defaults to stdout")
flagEnv := flag.Bool("new-env", true, "create a new environment for each program")
flagReruns := flag.Int("rerun", 3, "number of time program is rerun when a mismatch is found")
flag.Parse()
pools := make(map[int]*poolInfo)
for idx, cfg := range cfgs {
var err error
pi := &poolInfo{}
pi.cfg, err = mgrconfig.LoadFile(cfg)
if err != nil {
log.Fatalf("%v", err)
}
pi.pool, err = vm.Create(pi.cfg, *flagDebug)
if err != nil {
log.Fatalf("%v", err)
}
pools[idx] = pi
}
if len(pools) < 2 {
flag.Usage()
os.Exit(1)
}
cfg := pools[0].cfg
workdir, target, sysTarget, addr := cfg.Workdir, cfg.Target, cfg.SysTarget, cfg.RPC
for idx := 1; idx < len(pools); idx++ {
cfg := pools[idx].cfg
// TODO: pass the configurations that should be the same for all
// kernels in a default config file in order to avoid this checks and
// add testing
if workdir != cfg.Workdir {
log.Fatalf("working directory mismatch")
}
if target != cfg.Target {
log.Fatalf("target mismatch")
}
if sysTarget != cfg.SysTarget {
log.Fatalf("system target mismatch")
}
if addr != pools[idx].cfg.RPC {
log.Fatalf("tcp address mismatch")
}
}
exe := sysTarget.ExeExtension
runnerBin := filepath.Join(cfg.Syzkaller, "bin", target.OS+"_"+target.Arch, "syz-runner"+exe)
if !osutil.IsExist(runnerBin) {
log.Fatalf("bad syzkaller config: can't find %v", runnerBin)
}
execBin := cfg.ExecutorBin
if !osutil.IsExist(execBin) {
log.Fatalf("bad syzkaller config: can't find %v", execBin)
}
crashdir := filepath.Join(workdir, "crashes")
osutil.MkdirAll(crashdir)
for idx := range pools {
OS, Arch := target.OS, target.Arch
targetPath := OS + "-" + Arch + "-" + strconv.Itoa(idx)
osutil.MkdirAll(filepath.Join(workdir, targetPath))
osutil.MkdirAll(filepath.Join(crashdir, targetPath))
}
resultsdir := filepath.Join(workdir, "results")
osutil.MkdirAll(resultsdir)
var sw io.Writer
var err error
if *flagStats == "" {
sw = os.Stdout
} else {
statsFile := filepath.Join(workdir, *flagStats)
sw, err = os.Create(statsFile)
if err != nil {
log.Fatalf("failed to create stats output file: %v", err)
}
}
for idx, pi := range pools {
var err error
pi.Reporter, err = report.NewReporter(pi.cfg)
if err != nil {
log.Fatalf("failed to create reporter for instance-%d: %v", idx, err)
}
pi.runners = make(map[int]runnerProgs)
}
calls := make(map[*prog.Syscall]bool)
for _, id := range cfg.Syscalls {
c := target.Syscalls[id]
calls[c] = true
}
vrf := &Verifier{
workdir: workdir,
crashdir: crashdir,
resultsdir: resultsdir,
pools: pools,
target: target,
calls: calls,
reasons: make(map[*prog.Syscall]string),
rnd: rand.New(rand.NewSource(time.Now().UnixNano() + 1e12)),
runnerBin: runnerBin,
executorBin: execBin,
addr: addr,
reportReasons: len(cfg.EnabledSyscalls) != 0 || len(cfg.DisabledSyscalls) != 0,
statsWrite: sw,
newEnv: *flagEnv,
reruns: *flagReruns,
}
vrf.srv, err = startRPCServer(vrf)
if err != nil {
log.Fatalf("failed to initialise RPC server: %v", err)
}
vrf.startInstances()
}
|