aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2017-06-02 20:09:00 +0200
committerDmitry Vyukov <dvyukov@google.com>2017-06-03 11:31:42 +0200
commitaf643baa328ae3d4b7076054bba648c4b8bf8056 (patch)
tree6e4687c745b63352dec21f6ac2a6a7d8fa1201c4
parent96b8d4e99c7812f91633ea6cd1aee5867965e742 (diff)
vm: overhaul
VM infrastructure currently has several problems: - Config struct is complete mess with a superset of params for all VM types - verification of Config is mess spread across several places - there is no place where VM code could do global initialization like creating GCE connection, uploading GCE image to GCS, matching adb devices with consoles, etc - it hard to add private VM implementations such impl would need to add code to config package which would lead to constant merge conflicts - interface for VM implementation is mixed with interface for VM users this does not allow to provide best interface for both of them - there is no way to add common code for all VM implementations This change solves these problems by: - splitting VM interface for users (vm package) and VM interface for VM implementations (vmimpl pacakge), this in turn allows to add common code - adding Pool concept that allows to do global initialization and config checking at the right time - decoupling manager config from VM-specific config each VM type now defines own config Note: manager configs need to be changed after this change: VM-specific parts are moved to own "vm" subobject. Note: this change also drops "local" VM type. Its story was long unclear and there is now syz-stress which solves the same problem.
-rw-r--r--pkg/config/config.go6
-rw-r--r--pkg/config/config_test.go8
-rw-r--r--pkg/osutil/osutil.go35
-rw-r--r--repro/repro.go30
-rw-r--r--syz-gce/syz-gce.go13
-rw-r--r--syz-manager/config/config.go206
-rw-r--r--syz-manager/manager.go74
-rw-r--r--tools/syz-crush/crush.go40
-rw-r--r--tools/syz-repro/repro.go33
-rw-r--r--vm/adb/adb.go120
-rw-r--r--vm/gce/gce.go122
-rw-r--r--vm/kvm/kvm.go123
-rw-r--r--vm/local/local.go151
-rw-r--r--vm/odroid/odroid.go133
-rw-r--r--vm/qemu/qemu.go204
-rw-r--r--vm/vm.go150
-rw-r--r--vm/vmimpl/console.go (renamed from vm/console.go)5
-rw-r--r--vm/vmimpl/merger.go (renamed from vm/merger.go)2
-rw-r--r--vm/vmimpl/merger_test.go (renamed from vm/merger_test.go)8
-rw-r--r--vm/vmimpl/util.go19
-rw-r--r--vm/vmimpl/vmimpl.go77
21 files changed, 787 insertions, 772 deletions
diff --git a/pkg/config/config.go b/pkg/config/config.go
index 461ebdabc..03d4b283e 100644
--- a/pkg/config/config.go
+++ b/pkg/config/config.go
@@ -11,7 +11,7 @@ import (
"strings"
)
-func Load(filename string, cfg interface{}) error {
+func LoadFile(filename string, cfg interface{}) error {
if filename == "" {
return fmt.Errorf("no config file specified")
}
@@ -19,10 +19,10 @@ func Load(filename string, cfg interface{}) error {
if err != nil {
return fmt.Errorf("failed to read config file: %v", err)
}
- return load(data, cfg)
+ return LoadData(data, cfg)
}
-func load(data []byte, cfg interface{}) error {
+func LoadData(data []byte, cfg interface{}) error {
if err := checkUnknownFields(data, reflect.ValueOf(cfg).Type()); err != nil {
return err
}
diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go
index e36dd9ee9..79836f39e 100644
--- a/pkg/config/config_test.go
+++ b/pkg/config/config_test.go
@@ -161,7 +161,7 @@ func TestLoad(t *testing.T) {
for i, test := range tests {
t.Run(fmt.Sprint(i), func(t *testing.T) {
var cfg Config
- err := load([]byte(test.input), &cfg)
+ err := LoadData([]byte(test.input), &cfg)
errStr := ""
if err != nil {
errStr = err.Error()
@@ -178,15 +178,15 @@ func TestLoad(t *testing.T) {
func TestLoadBadType(t *testing.T) {
want := "config type is not pointer to struct"
- if err := load([]byte("{}"), 1); err == nil || err.Error() != want {
+ if err := LoadData([]byte("{}"), 1); err == nil || err.Error() != want {
t.Fatalf("got '%v', want '%v'", err, want)
}
i := 0
- if err := load([]byte("{}"), &i); err == nil || err.Error() != want {
+ if err := LoadData([]byte("{}"), &i); err == nil || err.Error() != want {
t.Fatalf("got '%v', want '%v'", err, want)
}
s := struct{}{}
- if err := load([]byte("{}"), s); err == nil || err.Error() != want {
+ if err := LoadData([]byte("{}"), s); err == nil || err.Error() != want {
t.Fatalf("got '%v', want '%v'", err, want)
}
}
diff --git a/pkg/osutil/osutil.go b/pkg/osutil/osutil.go
index 0bd4cdfac..3f42dbc12 100644
--- a/pkg/osutil/osutil.go
+++ b/pkg/osutil/osutil.go
@@ -6,7 +6,11 @@ package osutil
import (
"bytes"
"fmt"
+ "io"
+ "os"
"os/exec"
+ "path/filepath"
+ "syscall"
"time"
)
@@ -34,3 +38,34 @@ func RunCmd(timeout time.Duration, dir, bin string, args ...string) ([]byte, err
}
return output.Bytes(), nil
}
+
+func LongPipe() (io.ReadCloser, io.WriteCloser, error) {
+ r, w, err := os.Pipe()
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to create pipe: %v", err)
+ }
+ for sz := 128 << 10; sz <= 2<<20; sz *= 2 {
+ syscall.Syscall(syscall.SYS_FCNTL, w.Fd(), syscall.F_SETPIPE_SZ, uintptr(sz))
+ }
+ return r, w, err
+}
+
+var wd string
+
+func init() {
+ var err error
+ wd, err = os.Getwd()
+ if err != nil {
+ panic(fmt.Sprintf("failed to get wd: %v", err))
+ }
+}
+
+func Abs(path string) string {
+ if wd1, err := os.Getwd(); err == nil && wd1 != wd {
+ panic("don't mess with wd in a concurrent program")
+ }
+ if path == "" || filepath.IsAbs(path) {
+ return path
+ }
+ return filepath.Join(wd, path)
+}
diff --git a/repro/repro.go b/repro/repro.go
index 1cdd3efbe..c2f9e52a7 100644
--- a/repro/repro.go
+++ b/repro/repro.go
@@ -34,19 +34,16 @@ type context struct {
}
type instance struct {
- vm.Instance
+ *vm.Instance
index int
execprogBin string
executorBin string
}
-func Run(crashLog []byte, cfg *config.Config, vmIndexes []int) (*Result, error) {
+func Run(crashLog []byte, cfg *config.Config, vmPool *vm.Pool, vmIndexes []int) (*Result, error) {
if len(vmIndexes) == 0 {
return nil, fmt.Errorf("no VMs provided")
}
- if _, err := os.Stat(filepath.Join(cfg.Syzkaller, "bin/syz-execprog")); err != nil {
- return nil, fmt.Errorf("bin/syz-execprog is missing (run 'make execprog')")
- }
entries := prog.ParseLog(crashLog)
if len(entries) == 0 {
return nil, fmt.Errorf("crash log does not contain any programs")
@@ -80,13 +77,7 @@ func Run(crashLog []byte, cfg *config.Config, vmIndexes []int) (*Result, error)
continue
default:
}
- vmCfg, err := config.CreateVMConfig(cfg, vmIndex)
- if err != nil {
- Logf(0, "reproducing crash '%v': failed to create VM config: %v", crashDesc, err)
- time.Sleep(10 * time.Second)
- continue
- }
- vmInst, err := vm.Create(cfg.Type, vmCfg)
+ vmInst, err := vmPool.Create(vmIndex)
if err != nil {
Logf(0, "reproducing crash '%v': failed to create VM: %v", crashDesc, err)
time.Sleep(10 * time.Second)
@@ -107,7 +98,12 @@ func Run(crashLog []byte, cfg *config.Config, vmIndexes []int) (*Result, error)
time.Sleep(10 * time.Second)
continue
}
- inst = &instance{vmInst, vmIndex, execprogBin, executorBin}
+ inst = &instance{
+ Instance: vmInst,
+ index: vmIndex,
+ execprogBin: execprogBin,
+ executorBin: executorBin,
+ }
break
}
if inst == nil {
@@ -318,7 +314,7 @@ func (ctx *context) testProg(p *prog.Prog, duration time.Duration, opts csource.
inst.execprogBin, inst.executorBin, opts.Procs, repeat, opts.Sandbox, opts.Threaded, opts.Collide, opts.FaultCall, opts.FaultNth, vmProgFile)
Logf(2, "reproducing crash '%v': testing program (duration=%v, %+v): %s",
ctx.crashDesc, duration, opts, p)
- return ctx.testImpl(inst, command, duration)
+ return ctx.testImpl(inst.Instance, command, duration)
}
func (ctx *context) testBin(bin string, duration time.Duration) (crashed bool, err error) {
@@ -333,15 +329,15 @@ func (ctx *context) testBin(bin string, duration time.Duration) (crashed bool, e
return false, fmt.Errorf("failed to copy to VM: %v", err)
}
Logf(2, "reproducing crash '%v': testing compiled C program", ctx.crashDesc)
- return ctx.testImpl(inst, bin, duration)
+ return ctx.testImpl(inst.Instance, bin, duration)
}
-func (ctx *context) testImpl(inst vm.Instance, command string, duration time.Duration) (crashed bool, err error) {
+func (ctx *context) testImpl(inst *vm.Instance, command string, duration time.Duration) (crashed bool, err error) {
outc, errc, err := inst.Run(duration, nil, command)
if err != nil {
return false, fmt.Errorf("failed to run command in VM: %v", err)
}
- desc, text, output, crashed, timedout := vm.MonitorExecution(outc, errc, false, false, ctx.cfg.ParsedIgnores)
+ desc, text, output, crashed, timedout := vm.MonitorExecution(outc, errc, false, ctx.cfg.ParsedIgnores)
_, _, _ = text, output, timedout
if !crashed {
Logf(2, "reproducing crash '%v': program did not crash", ctx.crashDesc)
diff --git a/syz-gce/syz-gce.go b/syz-gce/syz-gce.go
index fcf9c86cd..0fc39f650 100644
--- a/syz-gce/syz-gce.go
+++ b/syz-gce/syz-gce.go
@@ -91,7 +91,7 @@ func main() {
cfg = &Config{
Use_Dashboard_Patches: true,
}
- if err := pkgconfig.Load(*flagConfig, cfg); err != nil {
+ if err := pkgconfig.LoadFile(*flagConfig, cfg); err != nil {
Fatalf("failed to load config file: %v", err)
}
EnableLogCaching(1000, 1<<20)
@@ -466,6 +466,10 @@ func writeManagerConfig(cfg *Config, httpPort int, file string) error {
if len(tag) != 0 && tag[len(tag)-1] == '\n' {
tag = tag[:len(tag)-1]
}
+ sshKey := ""
+ if _, err := os.Stat("image/key"); err == nil {
+ sshKey = "image/key"
+ }
managerCfg := &config.Config{
Name: cfg.Name,
Hub_Addr: cfg.Hub_Addr,
@@ -479,8 +483,6 @@ func writeManagerConfig(cfg *Config, httpPort int, file string) error {
Tag: string(tag),
Syzkaller: "gopath/src/github.com/google/syzkaller",
Type: "gce",
- Machine_Type: cfg.Machine_Type,
- Count: cfg.Machine_Count,
Image: cfg.Image_Name,
Sandbox: cfg.Sandbox,
Procs: cfg.Procs,
@@ -488,9 +490,8 @@ func writeManagerConfig(cfg *Config, httpPort int, file string) error {
Disable_Syscalls: cfg.Disable_Syscalls,
Cover: true,
Reproduce: true,
- }
- if _, err := os.Stat("image/key"); err == nil {
- managerCfg.Sshkey = "image/key"
+ VM: []byte(fmt.Sprintf(`{"count": %v, "machine_type": %q, "sshkey": %q}`,
+ cfg.Machine_Count, cfg.Machine_Type, sshKey)),
}
data, err := json.MarshalIndent(managerCfg, "", "\t")
if err != nil {
diff --git a/syz-manager/config/config.go b/syz-manager/config/config.go
index 16c8143fc..495daba09 100644
--- a/syz-manager/config/config.go
+++ b/syz-manager/config/config.go
@@ -4,6 +4,7 @@
package config
import (
+ "encoding/json"
"fmt"
"os"
"path/filepath"
@@ -11,29 +12,19 @@ import (
"strings"
pkgconfig "github.com/google/syzkaller/pkg/config"
- "github.com/google/syzkaller/pkg/fileutil"
+ "github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/sys"
- "github.com/google/syzkaller/vm"
)
type Config struct {
- Name string // Instance name (used for identification and as GCE instance prefix)
- Http string // TCP address to serve HTTP stats page (e.g. "localhost:50000")
- Rpc string // TCP address to serve RPC for fuzzer processes (optional, only useful for type "none")
- Workdir string
- Vmlinux string
- Kernel string // e.g. arch/x86/boot/bzImage
- Tag string // arbitrary optional tag that is saved along with crash reports (e.g. kernel branch/commit)
- Cmdline string // kernel command line
- Image string // linux image for VMs
- Initrd string // linux initial ramdisk. (optional)
- Cpu int // number of VM CPUs
- Mem int // amount of VM memory in MBs
- Sshkey string // root ssh key for the image
- Bin string // qemu/lkvm binary name
- Bin_Args string // additional command line arguments for qemu/lkvm binary
- Debug bool // dump all VM output to console
- Output string // one of stdout/dmesg/file (useful only for local VM)
+ Name string // Instance name (used for identification and as GCE instance prefix)
+ Http string // TCP address to serve HTTP stats page (e.g. "localhost:50000")
+ Rpc string // TCP address to serve RPC for fuzzer processes (optional)
+ Workdir string
+ Vmlinux string
+ Tag string // arbitrary optional tag that is saved along with crash reports (e.g. branch/commit)
+ Image string // linux image for VMs
+ Output string // one of stdout/dmesg/file (useful only for local VM)
Hub_Addr string
Hub_Key string
@@ -41,11 +32,8 @@ type Config struct {
Dashboard_Addr string
Dashboard_Key string
- Syzkaller string // path to syzkaller checkout (syz-manager will look for binaries in bin subdir)
- Type string // VM type (qemu, kvm, local)
- Count int // number of VMs (don't secify for adb, instead specify devices)
- Devices []string // device IDs for adb
- Procs int // number of parallel processes inside of every VM
+ Syzkaller string // path to syzkaller checkout (syz-manager will look for binaries in bin subdir)
+ Procs int // number of parallel processes inside of every VM
Sandbox string // type of sandbox to use during fuzzing:
// "none": don't do anything special (has false positives, e.g. due to killing init)
@@ -53,15 +41,6 @@ type Config struct {
// "namespace": create a new namespace for fuzzer using CLONE_NEWNS/CLONE_NEWNET/CLONE_NEWPID/etc,
// requires building kernel with CONFIG_NAMESPACES, CONFIG_UTS_NS, CONFIG_USER_NS, CONFIG_PID_NS and CONFIG_NET_NS.
- Machine_Type string // GCE machine type (e.g. "n1-highcpu-2")
-
- Odroid_Host_Addr string // ip address of the host machine
- Odroid_Slave_Addr string // ip address of the Odroid board
- Odroid_Console string // console device name (e.g. "/dev/ttyUSB0")
- Odroid_Hub_Bus int // host USB bus number for the USB hub
- Odroid_Hub_Device int // host USB device number for the USB hub
- Odroid_Hub_Port int // port on the USB hub to which Odroid is connected
-
Cover bool // use kcov coverage (default: true)
Leak bool // do memory leak checking
Reproduce bool // reproduce, localize and minimize crashers (on by default)
@@ -71,6 +50,9 @@ type Config struct {
Suppressions []string // don't save reports matching these regexps, but reboot VM after them
Ignores []string // completely ignore reports matching these regexps (don't save nor reboot)
+ Type string // VM type (qemu, kvm, local)
+ VM json.RawMessage // VM-type-specific config
+
// Implementation details beyond this point.
ParsedSuppressions []*regexp.Regexp `json:"-"`
ParsedIgnores []*regexp.Regexp `json:"-"`
@@ -81,8 +63,11 @@ func Parse(filename string) (*Config, map[int]bool, error) {
Cover: true,
Reproduce: true,
Sandbox: "setuid",
+ Rpc: "localhost:0",
+ Output: "stdout",
+ Procs: 1,
}
- if err := pkgconfig.Load(filename, cfg); err != nil {
+ if err := pkgconfig.LoadFile(filename, cfg); err != nil {
return nil, nil, err
}
if _, err := os.Stat(filepath.Join(cfg.Syzkaller, "bin/syz-fuzzer")); err != nil {
@@ -91,6 +76,9 @@ func Parse(filename string) (*Config, map[int]bool, error) {
if _, err := os.Stat(filepath.Join(cfg.Syzkaller, "bin/syz-executor")); err != nil {
return nil, nil, fmt.Errorf("bad config syzkaller param: can't find bin/syz-executor")
}
+ if _, err := os.Stat(filepath.Join(cfg.Syzkaller, "bin/syz-execprog")); err != nil {
+ return nil, nil, fmt.Errorf("bad config syzkaller param: can't find bin/syz-execprog")
+ }
if cfg.Http == "" {
return nil, nil, fmt.Errorf("config param http is empty")
}
@@ -103,75 +91,8 @@ func Parse(filename string) (*Config, map[int]bool, error) {
if cfg.Type == "" {
return nil, nil, fmt.Errorf("config param type is empty")
}
- switch cfg.Type {
- case "none":
- if cfg.Count != 0 {
- return nil, nil, fmt.Errorf("invalid config param count: %v, type \"none\" does not support param count", cfg.Count)
- }
- if cfg.Rpc == "" {
- return nil, nil, fmt.Errorf("config param rpc is empty (required for type \"none\")")
- }
- if len(cfg.Devices) != 0 {
- return nil, nil, fmt.Errorf("type %v does not support devices param", cfg.Type)
- }
- case "adb":
- if cfg.Count != 0 {
- return nil, nil, fmt.Errorf("don't specify count for adb, instead specify devices")
- }
- if len(cfg.Devices) == 0 {
- return nil, nil, fmt.Errorf("specify at least 1 adb device")
- }
- cfg.Count = len(cfg.Devices)
- case "odroid":
- if cfg.Count != 1 {
- return nil, nil, fmt.Errorf("no support for multiple Odroid devices yet, count should be 1")
- }
- if cfg.Odroid_Host_Addr == "" {
- return nil, nil, fmt.Errorf("config param odroid_host_addr is empty")
- }
- if cfg.Odroid_Slave_Addr == "" {
- return nil, nil, fmt.Errorf("config param odroid_slave_addr is empty")
- }
- if cfg.Odroid_Console == "" {
- return nil, nil, fmt.Errorf("config param odroid_console is empty")
- }
- if cfg.Odroid_Hub_Bus == 0 {
- return nil, nil, fmt.Errorf("config param odroid_hub_bus is empty")
- }
- if cfg.Odroid_Hub_Device == 0 {
- return nil, nil, fmt.Errorf("config param odroid_hub_device is empty")
- }
- if cfg.Odroid_Hub_Port == 0 {
- return nil, nil, fmt.Errorf("config param odroid_hub_port is empty")
- }
- case "gce":
- if cfg.Machine_Type == "" {
- return nil, nil, fmt.Errorf("machine_type parameter is empty (required for gce)")
- }
- fallthrough
- default:
- if cfg.Count <= 0 || cfg.Count > 1000 {
- return nil, nil, fmt.Errorf("invalid config param count: %v, want (1, 1000]", cfg.Count)
- }
- if len(cfg.Devices) != 0 {
- return nil, nil, fmt.Errorf("type %v does not support devices param", cfg.Type)
- }
- }
- if cfg.Rpc == "" {
- cfg.Rpc = "localhost:0"
- }
- if cfg.Procs <= 0 {
- cfg.Procs = 1
- }
- if cfg.Procs > 32 {
- return nil, nil, fmt.Errorf("config param procs has higher value '%v' then the max supported 32", cfg.Procs)
- }
- if cfg.Output == "" {
- if cfg.Type == "local" {
- cfg.Output = "none"
- } else {
- cfg.Output = "stdout"
- }
+ if cfg.Procs < 1 || cfg.Procs > 32 {
+ return nil, nil, fmt.Errorf("bad config param procs: '%v', want [1, 32]", cfg.Procs)
}
switch cfg.Output {
case "none", "stdout", "dmesg", "file":
@@ -184,23 +105,9 @@ func Parse(filename string) (*Config, map[int]bool, error) {
return nil, nil, fmt.Errorf("config param sandbox must contain one of none/setuid/namespace")
}
- wd, err := os.Getwd()
- if err != nil {
- return nil, nil, fmt.Errorf("failed to get wd: %v", err)
- }
- abs := func(path string) string {
- if path != "" && !filepath.IsAbs(path) {
- path = filepath.Join(wd, path)
- }
- return path
- }
- cfg.Workdir = abs(cfg.Workdir)
- cfg.Kernel = abs(cfg.Kernel)
- cfg.Vmlinux = abs(cfg.Vmlinux)
- cfg.Syzkaller = abs(cfg.Syzkaller)
- cfg.Initrd = abs(cfg.Initrd)
- cfg.Sshkey = abs(cfg.Sshkey)
- cfg.Bin = abs(cfg.Bin)
+ cfg.Workdir = osutil.Abs(cfg.Workdir)
+ cfg.Vmlinux = osutil.Abs(cfg.Vmlinux)
+ cfg.Syzkaller = osutil.Abs(cfg.Syzkaller)
syscalls, err := parseSyscalls(cfg)
if err != nil {
@@ -211,21 +118,14 @@ func Parse(filename string) (*Config, map[int]bool, error) {
return nil, nil, err
}
- if cfg.Hub_Addr != "" {
- if cfg.Name == "" {
- return nil, nil, fmt.Errorf("hub_addr is set, but name is empty")
- }
- if cfg.Hub_Key == "" {
- return nil, nil, fmt.Errorf("hub_addr is set, but hub_key is empty")
- }
+ if (cfg.Hub_Addr != "" || cfg.Dashboard_Addr != "") && cfg.Name == "" {
+ return nil, nil, fmt.Errorf("hub_addr//dashboard_addr is set, but name is empty")
}
- if cfg.Dashboard_Addr != "" {
- if cfg.Name == "" {
- return nil, nil, fmt.Errorf("dashboard_addr is set, but name is empty")
- }
- if cfg.Dashboard_Key == "" {
- return nil, nil, fmt.Errorf("dashboard_addr is set, but dashboard_key is empty")
- }
+ if cfg.Hub_Addr != "" && cfg.Hub_Key == "" {
+ return nil, nil, fmt.Errorf("hub_addr is set, but hub_key is empty")
+ }
+ if cfg.Dashboard_Addr != "" && cfg.Dashboard_Key == "" {
+ return nil, nil, fmt.Errorf("dashboard_addr is set, but dashboard_key is empty")
}
return cfg, syscalls, nil
@@ -291,7 +191,6 @@ func parseSuppressions(cfg *Config) error {
"signal SIGBUS: bus error", // presubmably OOM turned into SIGBUS
"Out of memory: Kill process .* \\(syz-fuzzer\\)",
"lowmemorykiller: Killing 'syz-fuzzer'",
- //"INFO: lockdep is turned off", // printed by some sysrq that dumps scheduler state, but also on all lockdep reports
}...)
for _, s := range supp {
re, err := regexp.Compile(s)
@@ -309,40 +208,3 @@ func parseSuppressions(cfg *Config) error {
}
return nil
}
-
-func CreateVMConfig(cfg *Config, index int) (*vm.Config, error) {
- if index < 0 || index >= cfg.Count {
- return nil, fmt.Errorf("invalid VM index %v (count %v)", index, cfg.Count)
- }
- workdir, err := fileutil.ProcessTempDir(cfg.Workdir)
- if err != nil {
- return nil, fmt.Errorf("failed to create instance temp dir: %v", err)
- }
- vmCfg := &vm.Config{
- Name: fmt.Sprintf("%v-%v-%v", cfg.Type, cfg.Name, index),
- Index: index,
- Workdir: workdir,
- Bin: cfg.Bin,
- BinArgs: cfg.Bin_Args,
- Kernel: cfg.Kernel,
- Cmdline: cfg.Cmdline,
- Image: cfg.Image,
- Initrd: cfg.Initrd,
- Sshkey: cfg.Sshkey,
- Executor: filepath.Join(cfg.Syzkaller, "bin", "syz-executor"),
- Cpu: cfg.Cpu,
- Mem: cfg.Mem,
- Debug: cfg.Debug,
- MachineType: cfg.Machine_Type,
- OdroidHostAddr: cfg.Odroid_Host_Addr,
- OdroidSlaveAddr: cfg.Odroid_Slave_Addr,
- OdroidConsole: cfg.Odroid_Console,
- OdroidHubBus: cfg.Odroid_Hub_Bus,
- OdroidHubDevice: cfg.Odroid_Hub_Device,
- OdroidHubPort: cfg.Odroid_Hub_Port,
- }
- if len(cfg.Devices) != 0 {
- vmCfg.Device = cfg.Devices[index]
- }
- return vmCfg, nil
-}
diff --git a/syz-manager/manager.go b/syz-manager/manager.go
index 9d7027a2c..edd12c727 100644
--- a/syz-manager/manager.go
+++ b/syz-manager/manager.go
@@ -31,12 +31,6 @@ import (
. "github.com/google/syzkaller/rpctype"
"github.com/google/syzkaller/syz-manager/config"
"github.com/google/syzkaller/vm"
- _ "github.com/google/syzkaller/vm/adb"
- _ "github.com/google/syzkaller/vm/gce"
- _ "github.com/google/syzkaller/vm/kvm"
- _ "github.com/google/syzkaller/vm/local"
- _ "github.com/google/syzkaller/vm/odroid"
- _ "github.com/google/syzkaller/vm/qemu"
)
var (
@@ -47,6 +41,7 @@ var (
type Manager struct {
cfg *config.Config
+ vmPool *vm.Pool
crashdir string
port int
corpusDB *db.DB
@@ -87,10 +82,10 @@ type Fuzzer struct {
}
type Crash struct {
- vmName string
- desc string
- text []byte
- output []byte
+ vmIndex int
+ desc string
+ text []byte
+ output []byte
}
func main() {
@@ -100,15 +95,23 @@ func main() {
if err != nil {
Fatalf("%v", err)
}
- if *flagDebug {
- cfg.Debug = true
- cfg.Count = 1
- }
initAllCover(cfg.Vmlinux)
RunManager(cfg, syscalls)
}
func RunManager(cfg *config.Config, syscalls map[int]bool) {
+ env := &vm.Env{
+ Name: cfg.Name,
+ Workdir: cfg.Workdir,
+ Image: cfg.Image,
+ Debug: *flagDebug,
+ Config: cfg.VM,
+ }
+ vmPool, err := vm.Create(cfg.Type, env)
+ if err != nil {
+ Fatalf("%v", err)
+ }
+
crashdir := filepath.Join(cfg.Workdir, "crashes")
os.MkdirAll(crashdir, 0700)
@@ -124,6 +127,7 @@ func RunManager(cfg *config.Config, syscalls map[int]bool) {
mgr := &Manager{
cfg: cfg,
+ vmPool: vmPool,
crashdir: crashdir,
startTime: time.Now(),
stats: make(map[string]uint64),
@@ -146,7 +150,6 @@ func RunManager(cfg *config.Config, syscalls map[int]bool) {
Fatalf("failed to convert old corpus: %v", err)
}
}
- var err error
mgr.corpusDB, err = db.Open(dbFilename)
if err != nil {
Fatalf("failed to open corpus database: %v", err)
@@ -313,12 +316,13 @@ func (mgr *Manager) vmLoop() {
Logf(0, "booting test machines...")
Logf(0, "wait for the connection from test machine...")
reproInstances := 4
- if reproInstances > mgr.cfg.Count {
- reproInstances = mgr.cfg.Count
+ vmCount := mgr.vmPool.Count()
+ if reproInstances > vmCount {
+ reproInstances = vmCount
}
- instances := make([]int, mgr.cfg.Count)
+ instances := make([]int, vmCount)
for i := range instances {
- instances[i] = mgr.cfg.Count - i - 1
+ instances[i] = vmCount - i - 1
}
runDone := make(chan *RunResult, 1)
pendingRepro := make(map[*Crash]bool)
@@ -342,10 +346,10 @@ func (mgr *Manager) vmLoop() {
}
Logf(1, "loop: shutdown=%v instances=%v/%v %+v repro: pending=%v reproducing=%v queued=%v",
- shutdown == nil, len(instances), mgr.cfg.Count, instances,
+ shutdown == nil, len(instances), vmCount, instances,
len(pendingRepro), len(reproducing), len(reproQueue))
if shutdown == nil {
- if len(instances) == mgr.cfg.Count {
+ if len(instances) == vmCount {
return
}
} else {
@@ -358,7 +362,7 @@ func (mgr *Manager) vmLoop() {
instances = instances[:len(instances)-reproInstances]
Logf(1, "loop: starting repro of '%v' on instances %+v", crash.desc, vmIndexes)
go func() {
- res, err := repro.Run(crash.output, mgr.cfg, vmIndexes)
+ res, err := repro.Run(crash.output, mgr.cfg, mgr.vmPool, vmIndexes)
reproDone <- &ReproResult{vmIndexes, crash, res, err}
}()
}
@@ -368,11 +372,7 @@ func (mgr *Manager) vmLoop() {
instances = instances[:last]
Logf(1, "loop: starting instance %v", idx)
go func() {
- vmCfg, err := config.CreateVMConfig(mgr.cfg, idx)
- if err != nil {
- Fatalf("failed to create VM config: %v", err)
- }
- crash, err := mgr.runInstance(vmCfg, idx == 0)
+ crash, err := mgr.runInstance(idx)
runDone <- &RunResult{idx, crash, err}
}()
}
@@ -423,8 +423,8 @@ func (mgr *Manager) vmLoop() {
}
}
-func (mgr *Manager) runInstance(vmCfg *vm.Config, first bool) (*Crash, error) {
- inst, err := vm.Create(mgr.cfg.Type, vmCfg)
+func (mgr *Manager) runInstance(index int) (*Crash, error) {
+ inst, err := mgr.vmPool.Create(index)
if err != nil {
return nil, fmt.Errorf("failed to create instance: %v", err)
}
@@ -444,7 +444,7 @@ func (mgr *Manager) runInstance(vmCfg *vm.Config, first bool) (*Crash, error) {
}
// Leak detection significantly slows down fuzzing, so detect leaks only on the first instance.
- leak := first && mgr.cfg.Leak
+ leak := mgr.cfg.Leak && index == 0
fuzzerV := 0
procs := mgr.cfg.Procs
if *flagDebug {
@@ -456,24 +456,24 @@ func (mgr *Manager) runInstance(vmCfg *vm.Config, first bool) (*Crash, error) {
start := time.Now()
atomic.AddUint32(&mgr.numFuzzing, 1)
defer atomic.AddUint32(&mgr.numFuzzing, ^uint32(0))
- cmd := fmt.Sprintf("%v -executor=%v -name=%v -manager=%v -output=%v -procs=%v -leak=%v -cover=%v -sandbox=%v -debug=%v -v=%d",
- fuzzerBin, executorBin, vmCfg.Name, fwdAddr, mgr.cfg.Output, procs, leak, mgr.cfg.Cover, mgr.cfg.Sandbox, *flagDebug, fuzzerV)
+ cmd := fmt.Sprintf("%v -executor=%v -name=vm-%v -manager=%v -output=%v -procs=%v -leak=%v -cover=%v -sandbox=%v -debug=%v -v=%d",
+ fuzzerBin, executorBin, index, fwdAddr, mgr.cfg.Output, procs, leak, mgr.cfg.Cover, mgr.cfg.Sandbox, *flagDebug, fuzzerV)
outc, errc, err := inst.Run(time.Hour, mgr.vmStop, cmd)
if err != nil {
return nil, fmt.Errorf("failed to run fuzzer: %v", err)
}
- desc, text, output, crashed, timedout := vm.MonitorExecution(outc, errc, mgr.cfg.Type == "local", true, mgr.cfg.ParsedIgnores)
+ desc, text, output, crashed, timedout := vm.MonitorExecution(outc, errc, true, mgr.cfg.ParsedIgnores)
if timedout {
// This is the only "OK" outcome.
- Logf(0, "%v: running for %v, restarting (%v)", vmCfg.Name, time.Since(start), desc)
+ Logf(0, "vm-%v: running for %v, restarting (%v)", index, time.Since(start), desc)
return nil, nil
}
if !crashed {
// syz-fuzzer exited, but it should not.
desc = "lost connection to test machine"
}
- return &Crash{vmCfg.Name, desc, text, output}, nil
+ return &Crash{index, desc, text, output}, nil
}
func (mgr *Manager) isSuppressed(crash *Crash) bool {
@@ -481,7 +481,7 @@ func (mgr *Manager) isSuppressed(crash *Crash) bool {
if !re.Match(crash.output) {
continue
}
- Logf(1, "%v: suppressing '%v' with '%v'", crash.vmName, crash.desc, re.String())
+ Logf(1, "vm-%v: suppressing '%v' with '%v'", crash.vmIndex, crash.desc, re.String())
mgr.mu.Lock()
mgr.stats["suppressed"]++
mgr.mu.Unlock()
@@ -491,7 +491,7 @@ func (mgr *Manager) isSuppressed(crash *Crash) bool {
}
func (mgr *Manager) saveCrash(crash *Crash) {
- Logf(0, "%v: crash: %v", crash.vmName, crash.desc)
+ Logf(0, "vm-%v: crash: %v", crash.vmIndex, crash.desc)
mgr.mu.Lock()
mgr.stats["crashes"]++
if !mgr.crashTypes[crash.desc] {
diff --git a/tools/syz-crush/crush.go b/tools/syz-crush/crush.go
index 5a785a264..38e7eb8d4 100644
--- a/tools/syz-crush/crush.go
+++ b/tools/syz-crush/crush.go
@@ -21,10 +21,6 @@ import (
. "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/syz-manager/config"
"github.com/google/syzkaller/vm"
- _ "github.com/google/syzkaller/vm/adb"
- _ "github.com/google/syzkaller/vm/gce"
- _ "github.com/google/syzkaller/vm/kvm"
- _ "github.com/google/syzkaller/vm/qemu"
)
var (
@@ -40,24 +36,28 @@ func main() {
if len(flag.Args()) != 1 {
Fatalf("usage: syz-crush -config=config.file execution.log")
}
+ env := &vm.Env{
+ Name: cfg.Name,
+ Workdir: cfg.Workdir,
+ Image: cfg.Image,
+ Debug: false,
+ Config: cfg.VM,
+ }
+ vmPool, err := vm.Create(cfg.Type, env)
+ if err != nil {
+ Fatalf("%v", err)
+ }
Logf(0, "booting test machines...")
var shutdown uint32
var wg sync.WaitGroup
- wg.Add(cfg.Count + 1)
- for i := 0; i < cfg.Count; i++ {
+ wg.Add(vmPool.Count() + 1)
+ for i := 0; i < vmPool.Count(); i++ {
i := i
go func() {
defer wg.Done()
for {
- vmCfg, err := config.CreateVMConfig(cfg, i)
- if atomic.LoadUint32(&shutdown) != 0 {
- break
- }
- if err != nil {
- Fatalf("failed to create VM config: %v", err)
- }
- runInstance(cfg, vmCfg)
+ runInstance(cfg, vmPool, i)
if atomic.LoadUint32(&shutdown) != 0 {
break
}
@@ -80,8 +80,8 @@ func main() {
wg.Wait()
}
-func runInstance(cfg *config.Config, vmCfg *vm.Config) {
- inst, err := vm.Create(cfg.Type, vmCfg)
+func runInstance(cfg *config.Config, vmPool *vm.Pool, index int) {
+ inst, err := vmPool.Create(index)
if err != nil {
Logf(0, "failed to create instance: %v", err)
return
@@ -112,11 +112,11 @@ func runInstance(cfg *config.Config, vmCfg *vm.Config) {
return
}
- Logf(0, "%v: crushing...", vmCfg.Name)
- desc, _, output, crashed, timedout := vm.MonitorExecution(outc, errc, cfg.Type == "local", true, cfg.ParsedIgnores)
+ Logf(0, "vm-%v: crushing...", index)
+ desc, _, output, crashed, timedout := vm.MonitorExecution(outc, errc, true, cfg.ParsedIgnores)
if timedout {
// This is the only "OK" outcome.
- Logf(0, "%v: running long enough, restarting", vmCfg.Name)
+ Logf(0, "vm-%v: running long enough, restarting", index)
} else {
if !crashed {
// syz-execprog exited, but it should not.
@@ -128,7 +128,7 @@ func runInstance(cfg *config.Config, vmCfg *vm.Config) {
return
}
defer f.Close()
- Logf(0, "%v: crashed: %v, saving to %v", vmCfg.Name, desc, f.Name())
+ Logf(0, "vm-%v: crashed: %v, saving to %v", index, desc, f.Name())
f.Write(output)
}
return
diff --git a/tools/syz-repro/repro.go b/tools/syz-repro/repro.go
index f984714a8..cf39d59e7 100644
--- a/tools/syz-repro/repro.go
+++ b/tools/syz-repro/repro.go
@@ -16,11 +16,6 @@ import (
"github.com/google/syzkaller/repro"
"github.com/google/syzkaller/syz-manager/config"
"github.com/google/syzkaller/vm"
- _ "github.com/google/syzkaller/vm/adb"
- _ "github.com/google/syzkaller/vm/gce"
- _ "github.com/google/syzkaller/vm/kvm"
- _ "github.com/google/syzkaller/vm/odroid"
- _ "github.com/google/syzkaller/vm/qemu"
)
var (
@@ -35,12 +30,6 @@ func main() {
if err != nil {
Fatalf("%v", err)
}
- if *flagCount > 0 {
- cfg.Count = *flagCount
- }
- if cfg.Count > 4 {
- cfg.Count = 4
- }
if len(flag.Args()) != 1 {
Fatalf("usage: syz-repro -config=config.file execution.log")
}
@@ -48,7 +37,25 @@ func main() {
if err != nil {
Fatalf("failed to open log file: %v", err)
}
- vmIndexes := make([]int, cfg.Count)
+ env := &vm.Env{
+ Name: cfg.Name,
+ Workdir: cfg.Workdir,
+ Image: cfg.Image,
+ Debug: false,
+ Config: cfg.VM,
+ }
+ vmPool, err := vm.Create(cfg.Type, env)
+ if err != nil {
+ Fatalf("%v", err)
+ }
+ vmCount := vmPool.Count()
+ if *flagCount > 0 || *flagCount < vmCount {
+ vmCount = *flagCount
+ }
+ if vmCount > 4 {
+ vmCount = 4
+ }
+ vmIndexes := make([]int, vmCount)
for i := range vmIndexes {
vmIndexes[i] = i
}
@@ -63,7 +70,7 @@ func main() {
Fatalf("terminating")
}()
- res, err := repro.Run(data, cfg, vmIndexes)
+ res, err := repro.Run(data, cfg, vmPool, vmIndexes)
if err != nil {
Logf(0, "reproduction failed: %v", err)
}
diff --git a/vm/adb/adb.go b/vm/adb/adb.go
index 59313c3cb..f6bc12328 100644
--- a/vm/adb/adb.go
+++ b/vm/adb/adb.go
@@ -17,24 +17,73 @@ import (
"sync"
"time"
+ "github.com/google/syzkaller/pkg/config"
. "github.com/google/syzkaller/pkg/log"
- "github.com/google/syzkaller/vm"
+ "github.com/google/syzkaller/pkg/osutil"
+ "github.com/google/syzkaller/vm/vmimpl"
)
func init() {
- vm.Register("adb", ctor)
+ vmimpl.Register("adb", ctor)
+}
+
+type Config struct {
+ Adb string // adb binary name ("adb" by default)
+ Devices []string // list of adb device IDs to use
+}
+
+type Pool struct {
+ env *vmimpl.Env
+ cfg *Config
}
type instance struct {
- cfg *vm.Config
+ adbBin string
+ device string
console string
closed chan bool
+ debug bool
}
-func ctor(cfg *vm.Config) (vm.Instance, error) {
+func ctor(env *vmimpl.Env) (vmimpl.Pool, error) {
+ cfg := &Config{
+ Adb: "adb",
+ }
+ if err := config.LoadData(env.Config, cfg); err != nil {
+ return nil, err
+ }
+ if _, err := exec.LookPath(cfg.Adb); err != nil {
+ return nil, err
+ }
+ if len(cfg.Devices) == 0 {
+ return nil, fmt.Errorf("no adb devices specified")
+ }
+ devRe := regexp.MustCompile("[0-9A-F]+")
+ for _, dev := range cfg.Devices {
+ if !devRe.MatchString(dev) {
+ return nil, fmt.Errorf("invalid adb device id '%v'", dev)
+ }
+ }
+ if env.Debug {
+ cfg.Devices = cfg.Devices[:1]
+ }
+ pool := &Pool{
+ cfg: cfg,
+ env: env,
+ }
+ return pool, nil
+}
+
+func (pool *Pool) Count() int {
+ return len(pool.cfg.Devices)
+}
+
+func (pool *Pool) Create(workdir string, index int) (vmimpl.Instance, error) {
inst := &instance{
- cfg: cfg,
+ adbBin: pool.cfg.Adb,
+ device: pool.cfg.Devices[index],
closed: make(chan bool),
+ debug: pool.env.Debug,
}
closeInst := inst
defer func() {
@@ -42,13 +91,10 @@ func ctor(cfg *vm.Config) (vm.Instance, error) {
closeInst.Close()
}
}()
- if err := validateConfig(cfg); err != nil {
- return nil, err
- }
if err := inst.repair(); err != nil {
return nil, err
}
- inst.console = findConsole(inst.cfg.Bin, inst.cfg.Device)
+ inst.console = findConsole(inst.adbBin, inst.device)
if err := inst.checkBatteryLevel(); err != nil {
return nil, err
}
@@ -60,19 +106,6 @@ func ctor(cfg *vm.Config) (vm.Instance, error) {
return inst, nil
}
-func validateConfig(cfg *vm.Config) error {
- if cfg.Bin == "" {
- cfg.Bin = "adb"
- }
- if _, err := exec.LookPath(cfg.Bin); err != nil {
- return err
- }
- if !regexp.MustCompile("[0-9A-F]+").MatchString(cfg.Device) {
- return fmt.Errorf("invalid adb device id '%v'", cfg.Device)
- }
- return nil
-}
-
var (
consoleCacheMu sync.Mutex
consoleToDev = make(map[string]string)
@@ -123,7 +156,7 @@ func findConsoleImpl(adb, dev string) (string, error) {
out := new([]byte)
output[con] = out
go func(con string) {
- tty, err := vm.OpenConsole(con)
+ tty, err := vmimpl.OpenConsole(con)
if err != nil {
errors <- err
return
@@ -187,7 +220,7 @@ func (inst *instance) Forward(port int) (string, error) {
}
func (inst *instance) adb(args ...string) ([]byte, error) {
- if inst.cfg.Debug {
+ if inst.debug {
Logf(0, "executing adb %+v", args)
}
rpipe, wpipe, err := os.Pipe()
@@ -196,7 +229,7 @@ func (inst *instance) adb(args ...string) ([]byte, error) {
}
defer wpipe.Close()
defer rpipe.Close()
- cmd := exec.Command(inst.cfg.Bin, append([]string{"-s", inst.cfg.Device}, args...)...)
+ cmd := exec.Command(inst.adbBin, append([]string{"-s", inst.device}, args...)...)
cmd.Stdout = wpipe
cmd.Stderr = wpipe
if err := cmd.Start(); err != nil {
@@ -207,7 +240,7 @@ func (inst *instance) adb(args ...string) ([]byte, error) {
go func() {
select {
case <-time.After(time.Minute):
- if inst.cfg.Debug {
+ if inst.debug {
Logf(0, "adb hanged")
}
cmd.Process.Kill()
@@ -217,13 +250,13 @@ func (inst *instance) adb(args ...string) ([]byte, error) {
if err := cmd.Wait(); err != nil {
close(done)
out, _ := ioutil.ReadAll(rpipe)
- if inst.cfg.Debug {
+ if inst.debug {
Logf(0, "adb failed: %v\n%s", err, out)
}
return nil, fmt.Errorf("adb %+v failed: %v\n%s", args, err, out)
}
close(done)
- if inst.cfg.Debug {
+ if inst.debug {
Logf(0, "adb returned")
}
out, _ := ioutil.ReadAll(rpipe)
@@ -243,7 +276,7 @@ func (inst *instance) repair() error {
return err
}
// Now give it another 5 minutes to boot.
- if !vm.SleepInterruptible(10 * time.Second) {
+ if !vmimpl.SleepInterruptible(10 * time.Second) {
return fmt.Errorf("shutdown in progress")
}
if err := inst.waitForSsh(); err != nil {
@@ -260,7 +293,7 @@ func (inst *instance) repair() error {
func (inst *instance) waitForSsh() error {
var err error
for i := 0; i < 300; i++ {
- if !vm.SleepInterruptible(time.Second) {
+ if !vmimpl.SleepInterruptible(time.Second) {
return fmt.Errorf("shutdown in progress")
}
if _, err = inst.adb("shell", "pwd"); err == nil {
@@ -280,12 +313,12 @@ func (inst *instance) checkBatteryLevel() error {
return err
}
if val >= minLevel {
- Logf(0, "device %v: battery level %v%%, OK", inst.cfg.Device, val)
+ Logf(0, "device %v: battery level %v%%, OK", inst.device, val)
return nil
}
for {
- Logf(0, "device %v: battery level %v%%, waiting for %v%%", inst.cfg.Device, val, requiredLevel)
- if !vm.SleepInterruptible(time.Minute) {
+ Logf(0, "device %v: battery level %v%%, waiting for %v%%", inst.device, val, requiredLevel)
+ if !vmimpl.SleepInterruptible(time.Minute) {
return nil
}
val, err = inst.getBatteryLevel(0)
@@ -332,7 +365,6 @@ func (inst *instance) getBatteryLevel(numRetry int) (int, error) {
func (inst *instance) Close() {
close(inst.closed)
- os.RemoveAll(inst.cfg.Workdir)
}
func (inst *instance) Copy(hostSrc string) (string, error) {
@@ -347,23 +379,23 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
var tty io.ReadCloser
var err error
if inst.console == "adb" {
- tty, err = vm.OpenAdbConsole(inst.cfg.Bin, inst.cfg.Device)
+ tty, err = vmimpl.OpenAdbConsole(inst.adbBin, inst.device)
} else {
- tty, err = vm.OpenConsole(inst.console)
+ tty, err = vmimpl.OpenConsole(inst.console)
}
if err != nil {
return nil, nil, err
}
- adbRpipe, adbWpipe, err := vm.LongPipe()
+ adbRpipe, adbWpipe, err := osutil.LongPipe()
if err != nil {
tty.Close()
return nil, nil, err
}
- if inst.cfg.Debug {
+ if inst.debug {
Logf(0, "starting: adb shell %v", command)
}
- adb := exec.Command(inst.cfg.Bin, "-s", inst.cfg.Device, "shell", "cd /data; "+command)
+ adb := exec.Command(inst.adbBin, "-s", inst.device, "shell", "cd /data; "+command)
adb.Stdout = adbWpipe
adb.Stderr = adbWpipe
if err := adb.Start(); err != nil {
@@ -375,10 +407,10 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
adbWpipe.Close()
var tee io.Writer
- if inst.cfg.Debug {
+ if inst.debug {
tee = os.Stdout
}
- merger := vm.NewOutputMerger(tee)
+ merger := vmimpl.NewOutputMerger(tee)
merger.Add("console", tty)
merger.Add("adb", adbRpipe)
@@ -393,11 +425,11 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
go func() {
select {
case <-time.After(timeout):
- signal(vm.TimeoutErr)
+ signal(vmimpl.TimeoutErr)
case <-stop:
- signal(vm.TimeoutErr)
+ signal(vmimpl.TimeoutErr)
case <-inst.closed:
- if inst.cfg.Debug {
+ if inst.debug {
Logf(0, "instance closed")
}
signal(fmt.Errorf("instance closed"))
diff --git a/vm/gce/gce.go b/vm/gce/gce.go
index f95967c29..afb393fd4 100644
--- a/vm/gce/gce.go
+++ b/vm/gce/gce.go
@@ -14,23 +14,36 @@ package gce
import (
"fmt"
"io/ioutil"
- "os"
"os/exec"
"path/filepath"
- "sync"
"time"
+ "github.com/google/syzkaller/pkg/config"
"github.com/google/syzkaller/pkg/gce"
. "github.com/google/syzkaller/pkg/log"
- "github.com/google/syzkaller/vm"
+ "github.com/google/syzkaller/pkg/osutil"
+ "github.com/google/syzkaller/vm/vmimpl"
)
func init() {
- vm.Register("gce", ctor)
+ vmimpl.Register("gce", ctor)
+}
+
+type Config struct {
+ Count int // number of VMs to use
+ Machine_Type string // GCE machine type (e.g. "n1-highcpu-2")
+ Sshkey string // root ssh key for the image
+}
+
+type Pool struct {
+ env *vmimpl.Env
+ cfg *Config
+ GCE *gce.Context
}
type instance struct {
- cfg *vm.Config
+ cfg *Config
+ GCE *gce.Context
name string
ip string
offset int64
@@ -41,31 +54,49 @@ type instance struct {
closed chan bool
}
-var (
- initOnce sync.Once
- GCE *gce.Context
-)
+func ctor(env *vmimpl.Env) (vmimpl.Pool, error) {
+ if env.Name == "" {
+ return nil, fmt.Errorf("config param name is empty (required for GCE)")
+ }
+ cfg := &Config{
+ Count: 1,
+ }
+ if err := config.LoadData(env.Config, cfg); err != nil {
+ return nil, err
+ }
+ if cfg.Count < 1 || cfg.Count > 1000 {
+ return nil, fmt.Errorf("invalid config param count: %v, want [1, 1000]", cfg.Count)
+ }
+ if env.Debug {
+ cfg.Count = 1
+ }
+ if cfg.Machine_Type == "" {
+ return nil, fmt.Errorf("machine_type parameter is empty")
+ }
+ cfg.Sshkey = osutil.Abs(cfg.Sshkey)
-func initGCE() {
- var err error
- GCE, err = gce.NewContext()
+ GCE, err := gce.NewContext()
if err != nil {
- Fatalf("failed to init gce: %v", err)
+ return nil, fmt.Errorf("failed to init gce: %v", err)
+ }
+ Logf(0, "gce initialized: running on %v, internal IP %v, project %v, zone %v",
+ GCE.Instance, GCE.InternalIP, GCE.ProjectID, GCE.ZoneID)
+ pool := &Pool{
+ cfg: cfg,
+ env: env,
+ GCE: GCE,
}
- Logf(0, "gce initialized: running on %v, internal IP %v, project %v, zone %v", GCE.Instance, GCE.InternalIP, GCE.ProjectID, GCE.ZoneID)
+ return pool, nil
}
-func ctor(cfg *vm.Config) (vm.Instance, error) {
- initOnce.Do(initGCE)
- ok := false
- defer func() {
- if !ok {
- os.RemoveAll(cfg.Workdir)
- }
- }()
+func (pool *Pool) Count() int {
+ return pool.cfg.Count
+}
+func (pool *Pool) Create(workdir string, index int) (vmimpl.Instance, error) {
+ name := fmt.Sprintf("%v-%v", pool.env.Name, index)
// Create SSH key for the instance.
- gceKey := filepath.Join(cfg.Workdir, "key")
+ gceKey := filepath.Join(workdir, "key")
keygen := exec.Command("ssh-keygen", "-t", "rsa", "-b", "2048", "-N", "", "-C", "syzkaller", "-f", gceKey)
if out, err := keygen.CombinedOutput(); err != nil {
return nil, fmt.Errorf("failed to execute ssh-keygen: %v\n%s", err, out)
@@ -75,35 +106,38 @@ func ctor(cfg *vm.Config) (vm.Instance, error) {
return nil, fmt.Errorf("failed to read file: %v", err)
}
- Logf(0, "deleting instance: %v", cfg.Name)
- if err := GCE.DeleteInstance(cfg.Name, true); err != nil {
+ Logf(0, "deleting instance: %v", name)
+ if err := pool.GCE.DeleteInstance(name, true); err != nil {
return nil, err
}
- Logf(0, "creating instance: %v", cfg.Name)
- ip, err := GCE.CreateInstance(cfg.Name, cfg.MachineType, cfg.Image, string(gceKeyPub))
+ Logf(0, "creating instance: %v", name)
+ ip, err := pool.GCE.CreateInstance(name, pool.cfg.Machine_Type, pool.env.Image, string(gceKeyPub))
if err != nil {
return nil, err
}
+
+ ok := false
defer func() {
if !ok {
- GCE.DeleteInstance(cfg.Name, true)
+ pool.GCE.DeleteInstance(name, true)
}
}()
- sshKey := cfg.Sshkey
+ sshKey := pool.cfg.Sshkey
sshUser := "root"
if sshKey == "" {
// Assuming image supports GCE ssh fanciness.
sshKey = gceKey
sshUser = "syzkaller"
}
- Logf(0, "wait instance to boot: %v (%v)", cfg.Name, ip)
+ Logf(0, "wait instance to boot: %v (%v)", name, ip)
if err := waitInstanceBoot(ip, sshKey, sshUser); err != nil {
return nil, err
}
ok = true
inst := &instance{
- cfg: cfg,
- name: cfg.Name,
+ cfg: pool.cfg,
+ GCE: pool.GCE,
+ name: name,
ip: ip,
gceKey: gceKey,
sshKey: sshKey,
@@ -115,12 +149,11 @@ func ctor(cfg *vm.Config) (vm.Instance, error) {
func (inst *instance) Close() {
close(inst.closed)
- GCE.DeleteInstance(inst.name, false)
- os.RemoveAll(inst.cfg.Workdir)
+ inst.GCE.DeleteInstance(inst.name, false)
}
func (inst *instance) Forward(port int) (string, error) {
- return fmt.Sprintf("%v:%v", GCE.InternalIP, port), nil
+ return fmt.Sprintf("%v:%v", inst.GCE.InternalIP, port), nil
}
func (inst *instance) Copy(hostSrc string) (string, error) {
@@ -147,12 +180,13 @@ func (inst *instance) Copy(hostSrc string) (string, error) {
}
func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command string) (<-chan []byte, <-chan error, error) {
- conRpipe, conWpipe, err := vm.LongPipe()
+ conRpipe, conWpipe, err := osutil.LongPipe()
if err != nil {
return nil, nil, err
}
- conAddr := fmt.Sprintf("%v.%v.%v.syzkaller.port=1@ssh-serialport.googleapis.com", GCE.ProjectID, GCE.ZoneID, inst.name)
+ conAddr := fmt.Sprintf("%v.%v.%v.syzkaller.port=1@ssh-serialport.googleapis.com",
+ inst.GCE.ProjectID, inst.GCE.ZoneID, inst.name)
conArgs := append(sshArgs(inst.gceKey, "-p", 9600), conAddr)
con := exec.Command("ssh", conArgs...)
con.Env = []string{}
@@ -171,7 +205,7 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
}
conWpipe.Close()
- sshRpipe, sshWpipe, err := vm.LongPipe()
+ sshRpipe, sshWpipe, err := osutil.LongPipe()
if err != nil {
con.Process.Kill()
sshRpipe.Close()
@@ -193,7 +227,7 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
}
sshWpipe.Close()
- merger := vm.NewOutputMerger(nil)
+ merger := vmimpl.NewOutputMerger(nil)
merger.Add("console", conRpipe)
merger.Add("ssh", sshRpipe)
@@ -208,9 +242,9 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
go func() {
select {
case <-time.After(timeout):
- signal(vm.TimeoutErr)
+ signal(vmimpl.TimeoutErr)
case <-stop:
- signal(vm.TimeoutErr)
+ signal(vmimpl.TimeoutErr)
case <-inst.closed:
signal(fmt.Errorf("instance closed"))
case err := <-merger.Err:
@@ -225,9 +259,9 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
} else {
// Check if the instance was terminated due to preemption or host maintenance.
time.Sleep(5 * time.Second) // just to avoid any GCE races
- if !GCE.IsInstanceRunning(inst.name) {
+ if !inst.GCE.IsInstanceRunning(inst.name) {
Logf(1, "%v: ssh exited but instance is not running", inst.name)
- err = vm.TimeoutErr
+ err = vmimpl.TimeoutErr
}
}
signal(err)
@@ -244,7 +278,7 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
func waitInstanceBoot(ip, sshKey, sshUser string) error {
for i := 0; i < 100; i++ {
- if !vm.SleepInterruptible(5 * time.Second) {
+ if !vmimpl.SleepInterruptible(5 * time.Second) {
return fmt.Errorf("shutdown in progress")
}
cmd := exec.Command("ssh", append(sshArgs(sshKey, "-p", 22), sshUser+"@"+ip, "pwd")...)
diff --git a/vm/kvm/kvm.go b/vm/kvm/kvm.go
index b0361e8de..c18ed27c7 100644
--- a/vm/kvm/kvm.go
+++ b/vm/kvm/kvm.go
@@ -1,6 +1,8 @@
// Copyright 2015 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+// Package kvm provides VMs based on lkvm (kvmtool) virtualization.
+// It is not well tested.
package kvm
import (
@@ -12,11 +14,12 @@ import (
"runtime"
"strconv"
"sync"
- "syscall"
"time"
+ "github.com/google/syzkaller/pkg/config"
"github.com/google/syzkaller/pkg/fileutil"
- "github.com/google/syzkaller/vm"
+ "github.com/google/syzkaller/pkg/osutil"
+ "github.com/google/syzkaller/vm/vmimpl"
)
const (
@@ -24,28 +27,85 @@ const (
)
func init() {
- vm.Register("kvm", ctor)
+ vmimpl.Register("kvm", ctor)
+}
+
+type Config struct {
+ Count int // number of VMs to use
+ Lkvm string // lkvm binary name
+ Kernel string // e.g. arch/x86/boot/bzImage
+ Cmdline string // kernel command line
+ Cpu int // number of VM CPUs
+ Mem int // amount of VM memory in MBs
+}
+
+type Pool struct {
+ env *vmimpl.Env
+ cfg *Config
}
type instance struct {
- cfg *vm.Config
+ cfg *Config
sandbox string
sandboxPath string
lkvm *exec.Cmd
readerC chan error
waiterC chan error
+ debug bool
mu sync.Mutex
outputB []byte
outputC chan []byte
}
-func ctor(cfg *vm.Config) (vm.Instance, error) {
- sandbox := fmt.Sprintf("syz-%v", cfg.Index)
+func ctor(env *vmimpl.Env) (vmimpl.Pool, error) {
+ cfg := &Config{
+ Count: 1,
+ Lkvm: "lkvm",
+ }
+ if err := config.LoadData(env.Config, cfg); err != nil {
+ return nil, err
+ }
+ if cfg.Count < 1 || cfg.Count > 1000 {
+ return nil, fmt.Errorf("invalid config param count: %v, want [1, 1000]", cfg.Count)
+ }
+ if env.Debug {
+ cfg.Count = 1
+ }
+ if env.Image != "" {
+ return nil, fmt.Errorf("lkvm does not support custom images")
+ }
+ if _, err := exec.LookPath(cfg.Lkvm); err != nil {
+ return nil, err
+ }
+ if _, err := os.Stat(cfg.Kernel); err != nil {
+ return nil, fmt.Errorf("kernel file '%v' does not exist: %v", cfg.Kernel, err)
+ }
+ if cfg.Cpu < 1 || cfg.Cpu > 1024 {
+ return nil, fmt.Errorf("invalid config param cpu: %v, want [1-1024]", cfg.Cpu)
+ }
+ if cfg.Mem < 128 || cfg.Mem > 1048576 {
+ return nil, fmt.Errorf("invalid config param mem: %v, want [128-1048576]", cfg.Mem)
+ }
+ cfg.Kernel = osutil.Abs(cfg.Kernel)
+ pool := &Pool{
+ cfg: cfg,
+ env: env,
+ }
+ return pool, nil
+}
+
+func (pool *Pool) Count() int {
+ return pool.cfg.Count
+}
+
+func (pool *Pool) Create(workdir string, index int) (vmimpl.Instance, error) {
+ sandbox := fmt.Sprintf("syz-%v", index)
inst := &instance{
- cfg: cfg,
+ cfg: pool.cfg,
sandbox: sandbox,
sandboxPath: filepath.Join(os.Getenv("HOME"), ".lkvm", sandbox),
+ debug: pool.env.Debug,
}
closeInst := inst
defer func() {
@@ -54,31 +114,24 @@ func ctor(cfg *vm.Config) (vm.Instance, error) {
}
}()
- if err := validateConfig(cfg); err != nil {
- return nil, err
- }
-
os.RemoveAll(inst.sandboxPath)
os.Remove(inst.sandboxPath + ".sock")
- out, err := exec.Command(inst.cfg.Bin, "setup", sandbox).CombinedOutput()
+ out, err := exec.Command(inst.cfg.Lkvm, "setup", sandbox).CombinedOutput()
if err != nil {
return nil, fmt.Errorf("failed to lkvm setup: %v\n%s", err, out)
}
- scriptPath := filepath.Join(cfg.Workdir, "script.sh")
+ scriptPath := filepath.Join(workdir, "script.sh")
if err := ioutil.WriteFile(scriptPath, []byte(script), 0700); err != nil {
return nil, fmt.Errorf("failed to create temp file: %v", err)
}
- rpipe, wpipe, err := os.Pipe()
+ rpipe, wpipe, err := osutil.LongPipe()
if err != nil {
return nil, fmt.Errorf("failed to create pipe: %v", err)
}
- for sz := 128 << 10; sz <= 2<<20; sz *= 2 {
- syscall.Syscall(syscall.SYS_FCNTL, wpipe.Fd(), syscall.F_SETPIPE_SZ, uintptr(sz))
- }
- inst.lkvm = exec.Command("taskset", "-c", strconv.Itoa(inst.cfg.Index%runtime.NumCPU()),
- inst.cfg.Bin, "sandbox",
+ inst.lkvm = exec.Command("taskset", "-c", strconv.Itoa(index%runtime.NumCPU()),
+ inst.cfg.Lkvm, "sandbox",
"--disk", inst.sandbox,
"--kernel", inst.cfg.Kernel,
"--params", "slub_debug=UZ "+inst.cfg.Cmdline,
@@ -102,7 +155,7 @@ func ctor(cfg *vm.Config) (vm.Instance, error) {
for {
n, err := rpipe.Read(buf[:])
if n != 0 {
- if inst.cfg.Debug {
+ if inst.debug {
os.Stdout.Write(buf[:n])
os.Stdout.Write([]byte{'\n'})
}
@@ -147,31 +200,6 @@ func ctor(cfg *vm.Config) (vm.Instance, error) {
return inst, nil
}
-func validateConfig(cfg *vm.Config) error {
- if cfg.Bin == "" {
- cfg.Bin = "lkvm"
- }
- if _, err := exec.LookPath(cfg.Bin); err != nil {
- return err
- }
- if cfg.Image != "" {
- return fmt.Errorf("lkvm does not support custom images")
- }
- if cfg.Sshkey != "" {
- return fmt.Errorf("lkvm does not need ssh key")
- }
- if _, err := os.Stat(cfg.Kernel); err != nil {
- return fmt.Errorf("kernel file '%v' does not exist: %v", cfg.Kernel, err)
- }
- if cfg.Cpu <= 0 || cfg.Cpu > 1024 {
- return fmt.Errorf("bad qemu cpu: %v, want [1-1024]", cfg.Cpu)
- }
- if cfg.Mem < 128 || cfg.Mem > 1048576 {
- return fmt.Errorf("bad qemu mem: %v, want [128-1048576]", cfg.Mem)
- }
- return nil
-}
-
func (inst *instance) Close() {
if inst.lkvm != nil {
inst.lkvm.Process.Kill()
@@ -179,7 +207,6 @@ func (inst *instance) Close() {
inst.waiterC <- err // repost it for waiting goroutines
<-inst.readerC
}
- os.RemoveAll(inst.cfg.Workdir)
os.RemoveAll(inst.sandboxPath)
os.Remove(inst.sandboxPath + ".sock")
}
@@ -235,10 +262,10 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
for {
select {
case <-timeoutTicker.C:
- resultErr = vm.TimeoutErr
+ resultErr = vmimpl.TimeoutErr
break loop
case <-stop:
- resultErr = vm.TimeoutErr
+ resultErr = vmimpl.TimeoutErr
break loop
case <-secondTicker.C:
if _, err := os.Stat(cmdFile); err != nil {
diff --git a/vm/local/local.go b/vm/local/local.go
deleted file mode 100644
index 09226c251..000000000
--- a/vm/local/local.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2015 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package local
-
-import (
- "fmt"
- "os"
- "os/exec"
- "path/filepath"
- "strings"
- "syscall"
- "time"
-
- "github.com/google/syzkaller/pkg/fileutil"
- "github.com/google/syzkaller/vm"
-)
-
-func init() {
- vm.Register("local", ctor)
-}
-
-type instance struct {
- cfg *vm.Config
- closed chan bool
-}
-
-func ctor(cfg *vm.Config) (vm.Instance, error) {
- // Disable annoying segfault dmesg messages, fuzzer is going to crash a lot.
- etrace, err := os.Open("/proc/sys/debug/exception-trace")
- if err == nil {
- etrace.Write([]byte{'0'})
- etrace.Close()
- }
-
- // Don't write executor core files.
- syscall.Setrlimit(syscall.RLIMIT_CORE, &syscall.Rlimit{0, 0})
-
- inst := &instance{
- cfg: cfg,
- closed: make(chan bool),
- }
- return inst, nil
-}
-
-func (inst *instance) Close() {
- close(inst.closed)
- os.RemoveAll(inst.cfg.Workdir)
-}
-
-func (inst *instance) Forward(port int) (string, error) {
- return fmt.Sprintf("127.0.0.1:%v", port), nil
-}
-
-func (inst *instance) Copy(hostSrc string) (string, error) {
- vmDst := filepath.Join(inst.cfg.Workdir, filepath.Base(hostSrc))
- if err := fileutil.CopyFile(hostSrc, vmDst); err != nil {
- return "", err
- }
- if err := os.Chmod(vmDst, 0777); err != nil {
- return "", err
- }
- return vmDst, nil
-}
-
-func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command string) (<-chan []byte, <-chan error, error) {
- rpipe, wpipe, err := os.Pipe()
- if err != nil {
- return nil, nil, fmt.Errorf("failed to create pipe: %v", err)
- }
- for sz := 128 << 10; sz <= 2<<20; sz *= 2 {
- syscall.Syscall(syscall.SYS_FCNTL, wpipe.Fd(), syscall.F_SETPIPE_SZ, uintptr(sz))
- }
- for strings.Index(command, " ") != -1 {
- command = strings.Replace(command, " ", " ", -1)
- }
- args := strings.Split(command, " ")
- cmd := exec.Command(args[0], args[1:]...)
- cmd.Stdout = wpipe
- cmd.Stderr = wpipe
- if err := cmd.Start(); err != nil {
- rpipe.Close()
- wpipe.Close()
- return nil, nil, err
- }
- wpipe.Close()
- outputC := make(chan []byte, 10)
- errorC := make(chan error, 1)
- done := make(chan bool)
- signal := func(err error) {
- time.Sleep(3 * time.Second) // wait for any pending output
- select {
- case errorC <- err:
- default:
- }
- }
- go func() {
- var buf [64 << 10]byte
- var output []byte
- for {
- n, err := rpipe.Read(buf[:])
- if n != 0 {
- if inst.cfg.Debug {
- os.Stdout.Write(buf[:n])
- os.Stdout.Write([]byte{'\n'})
- }
- output = append(output, buf[:n]...)
- select {
- case outputC <- output:
- output = nil
- default:
- }
- time.Sleep(time.Millisecond)
- }
- if err != nil {
- rpipe.Close()
- return
- }
- }
- }()
- go func() {
- err := cmd.Wait()
- signal(err)
- close(done)
- }()
- go func() {
- timeout := time.NewTicker(timeout)
- for {
- select {
- case <-timeout.C:
- signal(vm.TimeoutErr)
- cmd.Process.Kill()
- return
- case <-stop:
- signal(vm.TimeoutErr)
- cmd.Process.Kill()
- timeout.Stop()
- return
- case <-done:
- timeout.Stop()
- return
- case <-inst.closed:
- signal(fmt.Errorf("closed"))
- cmd.Process.Kill()
- timeout.Stop()
- return
- }
- }
- }()
- return outputC, errorC, nil
-}
diff --git a/vm/odroid/odroid.go b/vm/odroid/odroid.go
index 2277f0548..dcc4135b6 100644
--- a/vm/odroid/odroid.go
+++ b/vm/odroid/odroid.go
@@ -22,23 +22,82 @@ import (
"time"
"unsafe"
+ "github.com/google/syzkaller/pkg/config"
. "github.com/google/syzkaller/pkg/log"
- "github.com/google/syzkaller/vm"
+ "github.com/google/syzkaller/pkg/osutil"
+ "github.com/google/syzkaller/vm/vmimpl"
)
func init() {
- vm.Register("odroid", ctor)
+ vmimpl.Register("odroid", ctor)
+}
+
+type Config struct {
+ Host_Addr string // ip address of the host machine
+ Slave_Addr string // ip address of the Odroid board
+ Console string // console device name (e.g. "/dev/ttyUSB0")
+ Hub_Bus int // host USB bus number for the USB hub
+ Hub_Device int // host USB device number for the USB hub
+ Hub_Port int // port on the USB hub to which Odroid is connected
+ Sshkey string // root ssh key for the image
+}
+
+type Pool struct {
+ env *vmimpl.Env
+ cfg *Config
}
type instance struct {
- cfg *vm.Config
+ cfg *Config
closed chan bool
+ debug bool
+}
+
+func ctor(env *vmimpl.Env) (vmimpl.Pool, error) {
+ cfg := &Config{}
+ if err := config.LoadData(env.Config, cfg); err != nil {
+ return nil, err
+ }
+ if cfg.Host_Addr == "" {
+ return nil, fmt.Errorf("config param host_addr is empty")
+ }
+ if cfg.Slave_Addr == "" {
+ return nil, fmt.Errorf("config param slave_addr is empty")
+ }
+ if cfg.Console == "" {
+ return nil, fmt.Errorf("config param console is empty")
+ }
+ if cfg.Hub_Bus == 0 {
+ return nil, fmt.Errorf("config param hub_bus is empty")
+ }
+ if cfg.Hub_Device == 0 {
+ return nil, fmt.Errorf("config param hub_device is empty")
+ }
+ if cfg.Hub_Port == 0 {
+ return nil, fmt.Errorf("config param hub_port is empty")
+ }
+ if _, err := os.Stat(cfg.Sshkey); err != nil {
+ return nil, fmt.Errorf("ssh key '%v' does not exist: %v", cfg.Sshkey, err)
+ }
+ if _, err := os.Stat(cfg.Console); err != nil {
+ return nil, fmt.Errorf("console file '%v' does not exist: %v", cfg.Console, err)
+ }
+ pool := &Pool{
+ cfg: cfg,
+ env: env,
+ }
+ return pool, nil
}
-func ctor(cfg *vm.Config) (vm.Instance, error) {
+func (pool *Pool) Count() int {
+ return 1 // no support for multiple Odroid devices yet
+}
+
+func (pool *Pool) Create(workdir string, index int) (vmimpl.Instance, error) {
inst := &instance{
- cfg: cfg,
+ cfg: pool.cfg,
closed: make(chan bool),
+ debug: pool.env.Debug,
}
closeInst := inst
defer func() {
@@ -46,9 +105,6 @@ func ctor(cfg *vm.Config) (vm.Instance, error) {
closeInst.Close()
}
}()
- if err := validateConfig(cfg); err != nil {
- return nil, err
- }
if err := inst.repair(); err != nil {
return nil, err
}
@@ -63,32 +119,22 @@ func ctor(cfg *vm.Config) (vm.Instance, error) {
return inst, nil
}
-func validateConfig(cfg *vm.Config) error {
- if _, err := os.Stat(cfg.Sshkey); err != nil {
- return fmt.Errorf("ssh key '%v' does not exist: %v", cfg.Sshkey, err)
- }
- if _, err := os.Stat(cfg.OdroidConsole); err != nil {
- return fmt.Errorf("console file '%v' does not exist: %v", cfg.OdroidConsole, err)
- }
- return nil
-}
-
func (inst *instance) Forward(port int) (string, error) {
- return fmt.Sprintf(inst.cfg.OdroidHostAddr+":%v", port), nil
+ return fmt.Sprintf(inst.cfg.Host_Addr+":%v", port), nil
}
func (inst *instance) ssh(command string) ([]byte, error) {
- if inst.cfg.Debug {
+ if inst.debug {
Logf(0, "executing ssh %+v", command)
}
- rpipe, wpipe, err := vm.LongPipe()
+ rpipe, wpipe, err := osutil.LongPipe()
if err != nil {
return nil, err
}
- args := append(inst.sshArgs("-p"), "root@"+inst.cfg.OdroidSlaveAddr, command)
- if inst.cfg.Debug {
+ args := append(inst.sshArgs("-p"), "root@"+inst.cfg.Slave_Addr, command)
+ if inst.debug {
Logf(0, "running command: ssh %#v", args)
}
cmd := exec.Command("ssh", args...)
@@ -104,7 +150,7 @@ func (inst *instance) ssh(command string) ([]byte, error) {
go func() {
select {
case <-time.After(time.Minute):
- if inst.cfg.Debug {
+ if inst.debug {
Logf(0, "ssh hanged")
}
cmd.Process.Kill()
@@ -114,13 +160,13 @@ func (inst *instance) ssh(command string) ([]byte, error) {
if err := cmd.Wait(); err != nil {
close(done)
out, _ := ioutil.ReadAll(rpipe)
- if inst.cfg.Debug {
+ if inst.debug {
Logf(0, "ssh failed: %v\n%s", err, out)
}
return nil, fmt.Errorf("ssh %+v failed: %v\n%s", args, err, out)
}
close(done)
- if inst.cfg.Debug {
+ if inst.debug {
Logf(0, "ssh returned")
}
out, _ := ioutil.ReadAll(rpipe)
@@ -196,7 +242,7 @@ func (inst *instance) repair() error {
if err := inst.waitForSsh(10); err == nil {
Logf(1, "odroid: ssh succeeded, shutting down now")
inst.ssh("shutdown now")
- if !vm.SleepInterruptible(20 * time.Second) {
+ if !vmimpl.SleepInterruptible(20 * time.Second) {
return fmt.Errorf("shutdown in progress")
}
} else {
@@ -205,13 +251,13 @@ func (inst *instance) repair() error {
// Hard reset by turning off and back on power on a hub port.
Logf(1, "odroid: hard reset, turning off power")
- if err := switchPortPower(inst.cfg.OdroidHubBus, inst.cfg.OdroidHubDevice, inst.cfg.OdroidHubPort, false); err != nil {
+ if err := switchPortPower(inst.cfg.Hub_Bus, inst.cfg.Hub_Device, inst.cfg.Hub_Port, false); err != nil {
return err
}
- if !vm.SleepInterruptible(5 * time.Second) {
+ if !vmimpl.SleepInterruptible(5 * time.Second) {
return fmt.Errorf("shutdown in progress")
}
- if err := switchPortPower(inst.cfg.OdroidHubBus, inst.cfg.OdroidHubDevice, inst.cfg.OdroidHubPort, true); err != nil {
+ if err := switchPortPower(inst.cfg.Hub_Bus, inst.cfg.Hub_Device, inst.cfg.Hub_Port, true); err != nil {
return err
}
@@ -229,7 +275,7 @@ func (inst *instance) waitForSsh(timeout int) error {
var err error
start := time.Now()
for {
- if !vm.SleepInterruptible(time.Second) {
+ if !vmimpl.SleepInterruptible(time.Second) {
return fmt.Errorf("shutdown in progress")
}
if _, err = inst.ssh("pwd"); err == nil {
@@ -244,15 +290,14 @@ func (inst *instance) waitForSsh(timeout int) error {
func (inst *instance) Close() {
close(inst.closed)
- os.RemoveAll(inst.cfg.Workdir)
}
func (inst *instance) Copy(hostSrc string) (string, error) {
basePath := "/data/"
vmDst := filepath.Join(basePath, filepath.Base(hostSrc))
- args := append(inst.sshArgs("-P"), hostSrc, "root@"+inst.cfg.OdroidSlaveAddr+":"+vmDst)
+ args := append(inst.sshArgs("-P"), hostSrc, "root@"+inst.cfg.Slave_Addr+":"+vmDst)
cmd := exec.Command("scp", args...)
- if inst.cfg.Debug {
+ if inst.debug {
Logf(0, "running command: scp %#v", args)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stdout
@@ -277,19 +322,19 @@ func (inst *instance) Copy(hostSrc string) (string, error) {
}
func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command string) (<-chan []byte, <-chan error, error) {
- tty, err := vm.OpenConsole(inst.cfg.OdroidConsole)
+ tty, err := vmimpl.OpenConsole(inst.cfg.Console)
if err != nil {
return nil, nil, err
}
- rpipe, wpipe, err := vm.LongPipe()
+ rpipe, wpipe, err := osutil.LongPipe()
if err != nil {
tty.Close()
return nil, nil, err
}
- args := append(inst.sshArgs("-p"), "root@"+inst.cfg.OdroidSlaveAddr, "cd /data; "+command)
- if inst.cfg.Debug {
+ args := append(inst.sshArgs("-p"), "root@"+inst.cfg.Slave_Addr, "cd /data; "+command)
+ if inst.debug {
Logf(0, "running command: ssh %#v", args)
}
cmd := exec.Command("ssh", args...)
@@ -304,10 +349,10 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
wpipe.Close()
var tee io.Writer
- if inst.cfg.Debug {
+ if inst.debug {
tee = os.Stdout
}
- merger := vm.NewOutputMerger(tee)
+ merger := vmimpl.NewOutputMerger(tee)
merger.Add("console", tty)
merger.Add("ssh", rpipe)
@@ -322,11 +367,11 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
go func() {
select {
case <-time.After(timeout):
- signal(vm.TimeoutErr)
+ signal(vmimpl.TimeoutErr)
case <-stop:
- signal(vm.TimeoutErr)
+ signal(vmimpl.TimeoutErr)
case <-inst.closed:
- if inst.cfg.Debug {
+ if inst.debug {
Logf(0, "instance closed")
}
signal(fmt.Errorf("instance closed"))
@@ -363,7 +408,7 @@ func (inst *instance) sshArgs(portArg string) []string {
"-o", "StrictHostKeyChecking=no",
"-o", "LogLevel=error",
}
- if inst.cfg.Debug {
+ if inst.debug {
args = append(args, "-v")
}
return args
diff --git a/vm/qemu/qemu.go b/vm/qemu/qemu.go
index b0d677685..17e7ce252 100644
--- a/vm/qemu/qemu.go
+++ b/vm/qemu/qemu.go
@@ -16,8 +16,10 @@ import (
"strings"
"time"
+ "github.com/google/syzkaller/pkg/config"
. "github.com/google/syzkaller/pkg/log"
- "github.com/google/syzkaller/vm"
+ "github.com/google/syzkaller/pkg/osutil"
+ "github.com/google/syzkaller/vm/vmimpl"
)
const (
@@ -25,60 +27,132 @@ const (
)
func init() {
- vm.Register("qemu", ctor)
+ vmimpl.Register("qemu", ctor)
+}
+
+type Config struct {
+ Count int // number of VMs to use
+ Qemu string // qemu binary name (qemu-system-x86_64 by default)
+ Qemu_Args string // additional command line arguments for qemu binary
+ Kernel string // e.g. arch/x86/boot/bzImage
+ Cmdline string // kernel command line (can only be specified with kernel)
+ Initrd string // linux initial ramdisk. (optional)
+ Cpu int // number of VM CPUs
+ Mem int // amount of VM memory in MBs
+ Sshkey string // root ssh key for the image
+}
+
+type Pool struct {
+ env *vmimpl.Env
+ cfg *Config
}
type instance struct {
- cfg *vm.Config
+ cfg *Config
+ image string
+ debug bool
+ workdir string
+ sshkey string
port int
rpipe io.ReadCloser
wpipe io.WriteCloser
qemu *exec.Cmd
waiterC chan error
- merger *vm.OutputMerger
+ merger *vmimpl.OutputMerger
+}
+
+func ctor(env *vmimpl.Env) (vmimpl.Pool, error) {
+ cfg := &Config{
+ Count: 1,
+ Qemu: "qemu-system-x86_64",
+ }
+ if err := config.LoadData(env.Config, cfg); err != nil {
+ return nil, err
+ }
+ if cfg.Count < 1 || cfg.Count > 1000 {
+ return nil, fmt.Errorf("invalid config param count: %v, want [1, 1000]", cfg.Count)
+ }
+ if env.Debug {
+ cfg.Count = 1
+ }
+ if _, err := exec.LookPath(cfg.Qemu); err != nil {
+ return nil, err
+ }
+ if env.Image == "9p" {
+ if cfg.Kernel == "" {
+ return nil, fmt.Errorf("9p image requires kernel")
+ }
+ } else {
+ if _, err := os.Stat(env.Image); err != nil {
+ return nil, fmt.Errorf("image file '%v' does not exist: %v", env.Image, err)
+ }
+ if _, err := os.Stat(cfg.Sshkey); err != nil {
+ return nil, fmt.Errorf("ssh key '%v' does not exist: %v", cfg.Sshkey, err)
+ }
+ }
+ if cfg.Cpu <= 0 || cfg.Cpu > 1024 {
+ return nil, fmt.Errorf("bad qemu cpu: %v, want [1-1024]", cfg.Cpu)
+ }
+ if cfg.Mem < 128 || cfg.Mem > 1048576 {
+ return nil, fmt.Errorf("bad qemu mem: %v, want [128-1048576]", cfg.Mem)
+ }
+ cfg.Kernel = osutil.Abs(cfg.Kernel)
+ cfg.Initrd = osutil.Abs(cfg.Initrd)
+ cfg.Sshkey = osutil.Abs(cfg.Sshkey)
+ pool := &Pool{
+ cfg: cfg,
+ env: env,
+ }
+ return pool, nil
+}
+
+func (pool *Pool) Count() int {
+ return pool.cfg.Count
}
-func ctor(cfg *vm.Config) (vm.Instance, error) {
+func (pool *Pool) Create(workdir string, index int) (vmimpl.Instance, error) {
+ sshkey := pool.cfg.Sshkey
+ if pool.env.Image == "9p" {
+ sshkey = filepath.Join(workdir, "key")
+ keygen := exec.Command("ssh-keygen", "-t", "rsa", "-b", "2048", "-N", "", "-C", "", "-f", sshkey)
+ if out, err := keygen.CombinedOutput(); err != nil {
+ return nil, fmt.Errorf("failed to execute ssh-keygen: %v\n%s", err, out)
+ }
+ initFile := filepath.Join(workdir, "init.sh")
+ if err := ioutil.WriteFile(initFile, []byte(strings.Replace(initScript, "{{KEY}}", sshkey, -1)), 0777); err != nil {
+ return nil, fmt.Errorf("failed to create init file: %v", err)
+ }
+ }
+
for i := 0; ; i++ {
- inst, err := ctorImpl(cfg)
+ inst, err := pool.ctor(workdir, sshkey, index)
if err == nil {
return inst, nil
}
if i < 1000 && strings.Contains(err.Error(), "could not set up host forwarding rule") {
continue
}
- os.RemoveAll(cfg.Workdir)
return nil, err
}
}
-func ctorImpl(cfg *vm.Config) (vm.Instance, error) {
- inst := &instance{cfg: cfg}
+func (pool *Pool) ctor(workdir, sshkey string, index int) (vmimpl.Instance, error) {
+ inst := &instance{
+ cfg: pool.cfg,
+ image: pool.env.Image,
+ debug: pool.env.Debug,
+ workdir: workdir,
+ sshkey: sshkey,
+ }
closeInst := inst
defer func() {
if closeInst != nil {
- closeInst.close(false)
+ closeInst.Close()
}
}()
- if err := validateConfig(cfg); err != nil {
- return nil, err
- }
-
- if cfg.Image == "9p" {
- inst.cfg.Sshkey = filepath.Join(inst.cfg.Workdir, "key")
- keygen := exec.Command("ssh-keygen", "-t", "rsa", "-b", "2048", "-N", "", "-C", "", "-f", inst.cfg.Sshkey)
- if out, err := keygen.CombinedOutput(); err != nil {
- return nil, fmt.Errorf("failed to execute ssh-keygen: %v\n%s", err, out)
- }
- initFile := filepath.Join(cfg.Workdir, "init.sh")
- if err := ioutil.WriteFile(initFile, []byte(strings.Replace(initScript, "{{KEY}}", inst.cfg.Sshkey, -1)), 0777); err != nil {
- return nil, fmt.Errorf("failed to create init file: %v", err)
- }
- }
-
var err error
- inst.rpipe, inst.wpipe, err = vm.LongPipe()
+ inst.rpipe, inst.wpipe, err = osutil.LongPipe()
if err != nil {
return nil, err
}
@@ -91,39 +165,7 @@ func ctorImpl(cfg *vm.Config) (vm.Instance, error) {
return inst, nil
}
-func validateConfig(cfg *vm.Config) error {
- if cfg.Bin == "" {
- cfg.Bin = "qemu-system-x86_64"
- }
- if _, err := exec.LookPath(cfg.Bin); err != nil {
- return err
- }
- if cfg.Image == "9p" {
- if cfg.Kernel == "" {
- return fmt.Errorf("9p image requires kernel")
- }
- } else {
- if _, err := os.Stat(cfg.Image); err != nil {
- return fmt.Errorf("image file '%v' does not exist: %v", cfg.Image, err)
- }
- if _, err := os.Stat(cfg.Sshkey); err != nil {
- return fmt.Errorf("ssh key '%v' does not exist: %v", cfg.Sshkey, err)
- }
- }
- if cfg.Cpu <= 0 || cfg.Cpu > 1024 {
- return fmt.Errorf("bad qemu cpu: %v, want [1-1024]", cfg.Cpu)
- }
- if cfg.Mem < 128 || cfg.Mem > 1048576 {
- return fmt.Errorf("bad qemu mem: %v, want [128-1048576]", cfg.Mem)
- }
- return nil
-}
-
func (inst *instance) Close() {
- inst.close(true)
-}
-
-func (inst *instance) close(removeWorkDir bool) {
if inst.qemu != nil {
inst.qemu.Process.Kill()
err := <-inst.waiterC
@@ -138,10 +180,6 @@ func (inst *instance) close(removeWorkDir bool) {
if inst.wpipe != nil {
inst.wpipe.Close()
}
- os.Remove(filepath.Join(inst.cfg.Workdir, "key"))
- if removeWorkDir {
- os.RemoveAll(inst.cfg.Workdir)
- }
}
func (inst *instance) Boot() error {
@@ -165,7 +203,7 @@ func (inst *instance) Boot() error {
"-numa", "node,nodeid=0,cpus=0-1", "-numa", "node,nodeid=1,cpus=2-3",
"-smp", "sockets=2,cores=2,threads=1",
}
- if inst.cfg.BinArgs == "" {
+ if inst.cfg.Qemu_Args == "" {
// This is reasonable defaults for x86 kvm-enabled host.
args = append(args,
"-enable-kvm",
@@ -173,16 +211,16 @@ func (inst *instance) Boot() error {
"-soundhw", "all",
)
} else {
- args = append(args, strings.Split(inst.cfg.BinArgs, " ")...)
+ args = append(args, strings.Split(inst.cfg.Qemu_Args, " ")...)
}
- if inst.cfg.Image == "9p" {
+ if inst.image == "9p" {
args = append(args,
"-fsdev", "local,id=fsdev0,path=/,security_model=none,readonly",
"-device", "virtio-9p-pci,fsdev=fsdev0,mount_tag=/dev/root",
)
} else {
args = append(args,
- "-hda", inst.cfg.Image,
+ "-hda", inst.image,
"-snapshot",
)
}
@@ -198,9 +236,9 @@ func (inst *instance) Boot() error {
" kvm-intel.ept=1 kvm-intel.flexpriority=1 " +
" kvm-intel.vpid=1 kvm-intel.emulate_invalid_guest_state=1 kvm-intel.eptad=1 " +
" kvm-intel.enable_shadow_vmcs=1 kvm-intel.pml=1 kvm-intel.enable_apicv=1 "
- if inst.cfg.Image == "9p" {
+ if inst.image == "9p" {
cmdline += "root=/dev/root rootfstype=9p rootflags=trans=virtio,version=9p2000.L,cache=loose "
- cmdline += "init=" + filepath.Join(inst.cfg.Workdir, "init.sh") + " "
+ cmdline += "init=" + filepath.Join(inst.workdir, "init.sh") + " "
} else {
cmdline += "root=/dev/sda "
}
@@ -209,14 +247,14 @@ func (inst *instance) Boot() error {
"-append", cmdline+inst.cfg.Cmdline,
)
}
- if inst.cfg.Debug {
- Logf(0, "running command: %v %#v", inst.cfg.Bin, args)
+ if inst.debug {
+ Logf(0, "running command: %v %#v", inst.cfg.Qemu, args)
}
- qemu := exec.Command(inst.cfg.Bin, args...)
+ qemu := exec.Command(inst.cfg.Qemu, args...)
qemu.Stdout = inst.wpipe
qemu.Stderr = inst.wpipe
if err := qemu.Start(); err != nil {
- return fmt.Errorf("failed to start %v %+v: %v", inst.cfg.Bin, args, err)
+ return fmt.Errorf("failed to start %v %+v: %v", inst.cfg.Qemu, args, err)
}
inst.wpipe.Close()
inst.wpipe = nil
@@ -225,10 +263,10 @@ func (inst *instance) Boot() error {
// Start output merger.
var tee io.Writer
- if inst.cfg.Debug {
+ if inst.debug {
tee = os.Stdout
}
- inst.merger = vm.NewOutputMerger(tee)
+ inst.merger = vmimpl.NewOutputMerger(tee)
inst.merger.Add("qemu", inst.rpipe)
inst.rpipe = nil
@@ -293,13 +331,13 @@ func (inst *instance) Forward(port int) (string, error) {
func (inst *instance) Copy(hostSrc string) (string, error) {
basePath := "/"
- if inst.cfg.Image == "9p" {
+ if inst.image == "9p" {
basePath = "/tmp"
}
vmDst := filepath.Join(basePath, filepath.Base(hostSrc))
args := append(inst.sshArgs("-P"), hostSrc, "root@localhost:"+vmDst)
cmd := exec.Command("scp", args...)
- if inst.cfg.Debug {
+ if inst.debug {
Logf(0, "running command: scp %#v", args)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stdout
@@ -324,14 +362,14 @@ func (inst *instance) Copy(hostSrc string) (string, error) {
}
func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command string) (<-chan []byte, <-chan error, error) {
- rpipe, wpipe, err := vm.LongPipe()
+ rpipe, wpipe, err := osutil.LongPipe()
if err != nil {
return nil, nil, err
}
inst.merger.Add("ssh", rpipe)
args := append(inst.sshArgs("-p"), "root@localhost", command)
- if inst.cfg.Debug {
+ if inst.debug {
Logf(0, "running command: ssh %#v", args)
}
cmd := exec.Command("ssh", args...)
@@ -353,9 +391,9 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
go func() {
select {
case <-time.After(timeout):
- signal(vm.TimeoutErr)
+ signal(vmimpl.TimeoutErr)
case <-stop:
- signal(vm.TimeoutErr)
+ signal(vmimpl.TimeoutErr)
case err := <-inst.merger.Err:
cmd.Process.Kill()
if cmdErr := cmd.Wait(); cmdErr == nil {
@@ -374,7 +412,7 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
func (inst *instance) sshArgs(portArg string) []string {
args := []string{
- "-i", inst.cfg.Sshkey,
+ "-i", inst.sshkey,
portArg, strconv.Itoa(inst.port),
"-F", "/dev/null",
"-o", "ConnectionAttempts=10",
@@ -385,7 +423,7 @@ func (inst *instance) sshArgs(portArg string) []string {
"-o", "StrictHostKeyChecking=no",
"-o", "LogLevel=error",
}
- if inst.cfg.Debug {
+ if inst.debug {
args = append(args, "-v")
}
return args
diff --git a/vm/vm.go b/vm/vm.go
index 2140dc5cf..177c62057 100644
--- a/vm/vm.go
+++ b/vm/vm.go
@@ -1,99 +1,102 @@
// Copyright 2015 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+// Package vm provides an abstract test machine (VM, physical machine, etc)
+// interface for the rest of the system.
+// For convenience test machines are subsequently collectively called VMs.
+// Package wraps vmimpl package interface with some common functionality
+// and higher-level interface.
package vm
import (
"bytes"
- "errors"
"fmt"
- "io"
"os"
"regexp"
- "syscall"
"time"
+ "github.com/google/syzkaller/pkg/fileutil"
"github.com/google/syzkaller/report"
-)
-
-// Instance represents a Linux VM or a remote physical machine.
-type Instance interface {
- // Copy copies a hostSrc file into vm and returns file name in vm.
- Copy(hostSrc string) (string, error)
-
- // Forward setups forwarding from within VM to host port port
- // and returns address to use in VM.
- Forward(port int) (string, error)
+ "github.com/google/syzkaller/vm/vmimpl"
- // Run runs cmd inside of the VM (think of ssh cmd).
- // outc receives combined cmd and kernel console output.
- // errc receives either command Wait return error or vm.TimeoutErr.
- // Command is terminated after timeout. Send on the stop chan can be used to terminate it earlier.
- Run(timeout time.Duration, stop <-chan bool, command string) (outc <-chan []byte, errc <-chan error, err error)
+ _ "github.com/google/syzkaller/vm/adb"
+ _ "github.com/google/syzkaller/vm/gce"
+ _ "github.com/google/syzkaller/vm/kvm"
+ _ "github.com/google/syzkaller/vm/odroid"
+ _ "github.com/google/syzkaller/vm/qemu"
+)
- // Close stops and destroys the VM.
- Close()
+type Pool struct {
+ impl vmimpl.Pool
+ workdir string
}
-type Config struct {
- Name string
- Index int
- Workdir string
- Bin string
- BinArgs string
- Initrd string
- Kernel string
- Cmdline string
- Image string
- Sshkey string
- Executor string
- Device string
- MachineType string
- OdroidHostAddr string
- OdroidSlaveAddr string
- OdroidConsole string
- OdroidHubBus int
- OdroidHubDevice int
- OdroidHubPort int
- Cpu int
- Mem int
- Debug bool
+type Instance struct {
+ impl vmimpl.Instance
+ workdir string
+ index int
}
-type ctorFunc func(cfg *Config) (Instance, error)
+type Env vmimpl.Env
-var ctors = make(map[string]ctorFunc)
+var (
+ Shutdown = vmimpl.Shutdown
+ TimeoutErr = vmimpl.TimeoutErr
+)
-func Register(typ string, ctor ctorFunc) {
- ctors[typ] = ctor
+func Create(typ string, env *Env) (*Pool, error) {
+ impl, err := vmimpl.Create(typ, (*vmimpl.Env)(env))
+ if err != nil {
+ return nil, err
+ }
+ return &Pool{
+ impl: impl,
+ workdir: env.Workdir,
+ }, nil
}
-// Close to interrupt all pending operations.
-var Shutdown = make(chan struct{})
-
-// Create creates and boots a new VM instance.
-func Create(typ string, cfg *Config) (Instance, error) {
- ctor := ctors[typ]
- if ctor == nil {
- return nil, fmt.Errorf("unknown instance type '%v'", typ)
- }
- return ctor(cfg)
+func (pool *Pool) Count() int {
+ return pool.impl.Count()
}
-func LongPipe() (io.ReadCloser, io.WriteCloser, error) {
- r, w, err := os.Pipe()
+func (pool *Pool) Create(index int) (*Instance, error) {
+ if index < 0 || index >= pool.Count() {
+ return nil, fmt.Errorf("invalid VM index %v (count %v)", index, pool.Count())
+ }
+ workdir, err := fileutil.ProcessTempDir(pool.workdir)
if err != nil {
- return nil, nil, fmt.Errorf("failed to create pipe: %v", err)
+ return nil, fmt.Errorf("failed to create instance temp dir: %v", err)
}
- for sz := 128 << 10; sz <= 2<<20; sz *= 2 {
- syscall.Syscall(syscall.SYS_FCNTL, w.Fd(), syscall.F_SETPIPE_SZ, uintptr(sz))
+ impl, err := pool.impl.Create(workdir, index)
+ if err != nil {
+ os.RemoveAll(workdir)
+ return nil, err
}
- return r, w, err
+ return &Instance{
+ impl: impl,
+ workdir: workdir,
+ index: index,
+ }, nil
}
-var TimeoutErr = errors.New("timeout")
+func (inst *Instance) Copy(hostSrc string) (string, error) {
+ return inst.impl.Copy(hostSrc)
+}
+
+func (inst *Instance) Forward(port int) (string, error) {
+ return inst.impl.Forward(port)
+}
-func MonitorExecution(outc <-chan []byte, errc <-chan error, local, needOutput bool, ignores []*regexp.Regexp) (desc string, text, output []byte, crashed, timedout bool) {
+func (inst *Instance) Run(timeout time.Duration, stop <-chan bool, command string) (outc <-chan []byte, errc <-chan error, err error) {
+ return inst.impl.Run(timeout, stop, command)
+}
+
+func (inst *Instance) Close() {
+ inst.impl.Close()
+ os.RemoveAll(inst.workdir)
+}
+
+func MonitorExecution(outc <-chan []byte, errc <-chan error, needOutput bool, ignores []*regexp.Regexp) (desc string, text, output []byte, crashed, timedout bool) {
waitForOutput := func() {
dur := time.Second
if needOutput {
@@ -183,27 +186,14 @@ func MonitorExecution(outc <-chan []byte, errc <-chan error, local, needOutput b
}
// In some cases kernel constantly prints something to console,
// but fuzzer is not actually executing programs.
- if !local && time.Since(lastExecuteTime) > 3*time.Minute {
+ if time.Since(lastExecuteTime) > 3*time.Minute {
return "test machine is not executing programs", nil, output, true, false
}
case <-ticker.C:
tickerFired = true
- if !local {
- return "no output from test machine", nil, output, true, false
- }
+ return "no output from test machine", nil, output, true, false
case <-Shutdown:
return "", nil, nil, false, false
}
}
}
-
-// Sleep for d.
-// If shutdown is in progress, return false prematurely.
-func SleepInterruptible(d time.Duration) bool {
- select {
- case <-time.After(d):
- return true
- case <-Shutdown:
- return false
- }
-}
diff --git a/vm/console.go b/vm/vmimpl/console.go
index 6dd6a6edf..f4a0b71d1 100644
--- a/vm/console.go
+++ b/vm/vmimpl/console.go
@@ -3,7 +3,7 @@
// +build !ppc64le
-package vm
+package vmimpl
import (
"fmt"
@@ -13,6 +13,7 @@ import (
"syscall"
"unsafe"
+ "github.com/google/syzkaller/pkg/osutil"
"golang.org/x/sys/unix"
)
@@ -79,7 +80,7 @@ func (t *tty) Close() error {
// OpenAdbConsole provides fallback console output using 'adb shell dmesg -w'.
func OpenAdbConsole(bin, dev string) (rc io.ReadCloser, err error) {
- rpipe, wpipe, err := LongPipe()
+ rpipe, wpipe, err := osutil.LongPipe()
if err != nil {
return nil, err
}
diff --git a/vm/merger.go b/vm/vmimpl/merger.go
index 2902eda6c..17b837602 100644
--- a/vm/merger.go
+++ b/vm/vmimpl/merger.go
@@ -1,7 +1,7 @@
// Copyright 2016 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-package vm
+package vmimpl
import (
"bytes"
diff --git a/vm/merger_test.go b/vm/vmimpl/merger_test.go
index 35da35284..335dcb228 100644
--- a/vm/merger_test.go
+++ b/vm/vmimpl/merger_test.go
@@ -1,26 +1,28 @@
// Copyright 2016 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-package vm
+package vmimpl
import (
"bytes"
"testing"
"time"
+
+ "github.com/google/syzkaller/pkg/osutil"
)
func TestMerger(t *testing.T) {
tee := new(bytes.Buffer)
merger := NewOutputMerger(tee)
- rp1, wp1, err := LongPipe()
+ rp1, wp1, err := osutil.LongPipe()
if err != nil {
t.Fatal(err)
}
defer wp1.Close()
merger.Add("pipe1", rp1)
- rp2, wp2, err := LongPipe()
+ rp2, wp2, err := osutil.LongPipe()
if err != nil {
t.Fatal(err)
}
diff --git a/vm/vmimpl/util.go b/vm/vmimpl/util.go
new file mode 100644
index 000000000..f80de8ab1
--- /dev/null
+++ b/vm/vmimpl/util.go
@@ -0,0 +1,19 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package vmimpl
+
+import (
+ "time"
+)
+
+// Sleep for d.
+// If shutdown is in progress, return false prematurely.
+func SleepInterruptible(d time.Duration) bool {
+ select {
+ case <-time.After(d):
+ return true
+ case <-Shutdown:
+ return false
+ }
+}
diff --git a/vm/vmimpl/vmimpl.go b/vm/vmimpl/vmimpl.go
new file mode 100644
index 000000000..4c542429c
--- /dev/null
+++ b/vm/vmimpl/vmimpl.go
@@ -0,0 +1,77 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+// Package vmimpl provides an abstract test machine (VM, physical machine, etc)
+// interface for the rest of the system. For convenience test machines are subsequently
+// collectively called VMs.
+// The package also provides various utility functions for VM implementations.
+package vmimpl
+
+import (
+ "errors"
+ "fmt"
+ "time"
+)
+
+// Pool represents a set of test machines (VMs, physical devices, etc) of particular type.
+type Pool interface {
+ // Count returns total number of VMs in the pool.
+ Count() int
+
+ // Create creates and boots a new VM instance.
+ Create(workdir string, index int) (Instance, error)
+}
+
+// Instance represents a single VM.
+type Instance interface {
+ // Copy copies a hostSrc file into VM and returns file name in VM.
+ Copy(hostSrc string) (string, error)
+
+ // Forward setups forwarding from within VM to host port port
+ // and returns address to use in VM.
+ Forward(port int) (string, error)
+
+ // Run runs cmd inside of the VM (think of ssh cmd).
+ // outc receives combined cmd and kernel console output.
+ // errc receives either command Wait return error or vmimpl.TimeoutErr.
+ // Command is terminated after timeout. Send on the stop chan can be used to terminate it earlier.
+ Run(timeout time.Duration, stop <-chan bool, command string) (outc <-chan []byte, errc <-chan error, err error)
+
+ // Close stops and destroys the VM.
+ Close()
+}
+
+// Env contains global constant parameters for a pool of VMs.
+type Env struct {
+ // Unique name
+ // Can be used for VM name collision resolution if several pools share global name space.
+ Name string
+ Workdir string
+ Image string
+ Debug bool
+ Config []byte // json-serialized VM-type-specific config
+}
+
+// Create creates a VM type that can be used to create individual VMs.
+func Create(typ string, env *Env) (Pool, error) {
+ ctor := ctors[typ]
+ if ctor == nil {
+ return nil, fmt.Errorf("unknown instance type '%v'", typ)
+ }
+ return ctor(env)
+}
+
+// Register registers a new VM type within the package.
+func Register(typ string, ctor ctorFunc) {
+ ctors[typ] = ctor
+}
+
+var (
+ // Close to interrupt all pending operations in all VMs.
+ Shutdown = make(chan struct{})
+ TimeoutErr = errors.New("timeout")
+
+ ctors = make(map[string]ctorFunc)
+)
+
+type ctorFunc func(env *Env) (Pool, error)