aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2015-12-23 19:11:29 +0100
committerDmitry Vyukov <dvyukov@google.com>2015-12-23 19:12:45 +0100
commit2eb388c0f81bfc3acf0c147654905c6d5c03df0d (patch)
treeb3aeb76cbe663966ce5f67a5d539623df73b2361
parent5c0a66219885185a6ef6a938fc157242706f042a (diff)
vm: improve VM interface
Current interface is suitable only for running syz-fuzzer. Make the interface more generic (boot, copy file, run an arbitrary command). This allows to build other tools on top of vm package (e.g. reproducer creation).
-rw-r--r--config/config.go200
-rw-r--r--fileutil/fileutil.go52
-rw-r--r--syz-fuzzer/fuzzer.go1
-rw-r--r--syz-manager/main.go189
-rw-r--r--syz-manager/manager.go213
-rw-r--r--vm/kvm/kvm.go536
-rw-r--r--vm/local/local.go110
-rw-r--r--vm/qemu/qemu.go636
-rw-r--r--vm/vm.go46
9 files changed, 887 insertions, 1096 deletions
diff --git a/config/config.go b/config/config.go
new file mode 100644
index 000000000..e415217e8
--- /dev/null
+++ b/config/config.go
@@ -0,0 +1,200 @@
+// Copyright 2015 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package config
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "github.com/google/syzkaller/fileutil"
+ "github.com/google/syzkaller/sys"
+ "github.com/google/syzkaller/vm"
+)
+
+type Config struct {
+ Http string
+ Workdir string
+ Vmlinux string
+ Kernel string // e.g. arch/x86/boot/bzImage
+ Cmdline string // kernel command line
+ Image string // linux image for VMs
+ Cpu int // number of VM CPUs
+ Mem int // amount of VM memory in MBs
+ Sshkey string // root ssh key for the image
+ Port int // VM ssh port to use
+ Bin string // qemu/lkvm binary name
+ Debug bool // dump all VM output to console
+
+ Syzkaller string // path to syzkaller checkout (syz-manager will look for binaries in bin subdir)
+ Type string // VM type (qemu, kvm, local)
+ Count int // number of VMs
+ Procs int // number of parallel processes inside of every VM
+
+ NoCover bool
+ NoDropPrivs bool
+ Leak bool // do memory leak checking
+
+ Enable_Syscalls []string
+ Disable_Syscalls []string
+ Suppressions []string
+}
+
+func Parse(filename string) (*Config, string, []*regexp.Regexp, error) {
+ if filename == "" {
+ return nil, "", nil, fmt.Errorf("supply config in -config flag")
+ }
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, "", nil, fmt.Errorf("failed to read config file: %v", err)
+ }
+ cfg := new(Config)
+ if err := json.Unmarshal(data, cfg); err != nil {
+ return nil, "", nil, fmt.Errorf("failed to parse config file: %v", err)
+ }
+ if _, err := os.Stat(filepath.Join(cfg.Syzkaller, "bin/syz-fuzzer")); err != nil {
+ return nil, "", nil, fmt.Errorf("bad config syzkaller param: can't find bin/syz-fuzzer")
+ }
+ if _, err := os.Stat(filepath.Join(cfg.Syzkaller, "bin/syz-executor")); err != nil {
+ return nil, "", nil, fmt.Errorf("bad config syzkaller param: can't find bin/syz-executor")
+ }
+ if _, err := os.Stat(filepath.Join(cfg.Syzkaller, "bin/syz-execprog")); err != nil {
+ return nil, "", nil, fmt.Errorf("bad config syzkaller param: can't find bin/syz-execprog")
+ }
+ if cfg.Http == "" {
+ return nil, "", nil, fmt.Errorf("config param http is empty")
+ }
+ if cfg.Workdir == "" {
+ return nil, "", nil, fmt.Errorf("config param workdir is empty")
+ }
+ if cfg.Vmlinux == "" {
+ return nil, "", nil, fmt.Errorf("config param vmlinux is empty")
+ }
+ if cfg.Type == "" {
+ return nil, "", nil, fmt.Errorf("config param type is empty")
+ }
+ if cfg.Count <= 0 || cfg.Count > 1000 {
+ return nil, "", nil, fmt.Errorf("invalid config param count: %v, want (1, 1000]", cfg.Count)
+ }
+ if cfg.Procs <= 0 {
+ cfg.Procs = 1
+ }
+
+ syscalls, err := parseSyscalls(cfg)
+ if err != nil {
+ return nil, "", nil, err
+ }
+
+ suppressions, err := parseSuppressions(cfg)
+ if err != nil {
+ return nil, "", nil, err
+ }
+
+ return cfg, syscalls, suppressions, nil
+}
+
+func parseSyscalls(cfg *Config) (string, error) {
+ if len(cfg.Enable_Syscalls) == 0 && len(cfg.Disable_Syscalls) == 0 {
+ return "", nil
+ }
+
+ match := func(call *sys.Call, str string) bool {
+ if str == call.CallName || str == call.Name {
+ return true
+ }
+ if len(str) > 1 && str[len(str)-1] == '*' && strings.HasPrefix(call.Name, str[:len(str)-1]) {
+ return true
+ }
+ return false
+ }
+
+ syscalls := make(map[int]bool)
+ if len(cfg.Enable_Syscalls) != 0 {
+ for _, c := range cfg.Enable_Syscalls {
+ n := 0
+ for _, call := range sys.Calls {
+ if match(call, c) {
+ syscalls[call.ID] = true
+ n++
+ }
+ }
+ if n == 0 {
+ return "", fmt.Errorf("unknown enabled syscall: %v", c)
+ }
+ }
+ } else {
+ for _, call := range sys.Calls {
+ syscalls[call.ID] = true
+ }
+ }
+ for _, c := range cfg.Disable_Syscalls {
+ n := 0
+ for _, call := range sys.Calls {
+ if match(call, c) {
+ delete(syscalls, call.ID)
+ n++
+ }
+ }
+ if n == 0 {
+ return "", fmt.Errorf("unknown disabled syscall: %v", c)
+ }
+ }
+ // They will be generated anyway.
+ syscalls[sys.CallMap["mmap"].ID] = true
+ syscalls[sys.CallMap["clock_gettime"].ID] = true
+
+ buf := new(bytes.Buffer)
+ for c := range syscalls {
+ fmt.Fprintf(buf, ",%v", c)
+ }
+ return buf.String()[1:], nil
+}
+
+func parseSuppressions(cfg *Config) ([]*regexp.Regexp, error) {
+ // Add some builtin suppressions.
+ supp := append(cfg.Suppressions, []string{
+ "panic: failed to start executor binary",
+ "panic: executor failed: pthread_create failed",
+ "panic: failed to create temp dir",
+ "fatal error: runtime: out of memory",
+ "Out of memory: Kill process .* \\(syzkaller_fuzze\\)",
+ "WARNING: KASAN doesn't support memory hot-add",
+ }...)
+ var suppressions []*regexp.Regexp
+ for _, s := range supp {
+ re, err := regexp.Compile(s)
+ if err != nil {
+ return nil, fmt.Errorf("failed to compile suppression '%v': %v", s, err)
+ }
+ suppressions = append(suppressions, re)
+ }
+
+ return suppressions, nil
+}
+
+func CreateVMConfig(cfg *Config) (*vm.Config, error) {
+ workdir, index, err := fileutil.ProcessTempDir(cfg.Workdir)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create instance temp dir: %v", err)
+ }
+ vmCfg := &vm.Config{
+ Name: fmt.Sprintf("%v-%v", cfg.Type, index),
+ Index: index,
+ Workdir: workdir,
+ Bin: cfg.Bin,
+ Kernel: cfg.Kernel,
+ Cmdline: cfg.Cmdline,
+ Image: cfg.Image,
+ Sshkey: cfg.Sshkey,
+ Cpu: cfg.Cpu,
+ Mem: cfg.Mem,
+ Debug: cfg.Debug,
+ }
+ return vmCfg, nil
+}
diff --git a/fileutil/fileutil.go b/fileutil/fileutil.go
index 5945315cb..34e2562fa 100644
--- a/fileutil/fileutil.go
+++ b/fileutil/fileutil.go
@@ -8,9 +8,22 @@ import (
"io"
"io/ioutil"
"os"
+ "path/filepath"
+ "strconv"
+ "sync"
+ "syscall"
)
-func CopyFile(oldFile, newFile string) error {
+var copyMu sync.Mutex
+
+// CopyFile copies oldFile to newFile, potentially serializing with other
+// file copies (for large files).
+func CopyFile(oldFile, newFile string, serial bool) error {
+ if serial {
+ copyMu.Lock()
+ defer copyMu.Unlock()
+ }
+
oldf, err := os.Open(oldFile)
if err != nil {
return err
@@ -42,3 +55,40 @@ func WriteTempFile(data []byte) (string, error) {
f.Close()
return f.Name(), nil
}
+
+// ProcessTempDir creates a new temp dir in where and returns its path and an unique index.
+// It also cleans up old, unused temp dirs after dead processes.
+func ProcessTempDir(where string) (string, int, error) {
+ for i := 0; i < 1e4; i++ {
+ path := filepath.Join(where, fmt.Sprintf("instance-%v", i))
+ pidfile := filepath.Join(path, ".pid")
+ err := os.Mkdir(path, 0700)
+ if os.IsExist(err) {
+ // Try to clean up.
+ data, err := ioutil.ReadFile(pidfile)
+ if err == nil {
+ pid, err := strconv.Atoi(string(data))
+ if err == nil && pid > 1 {
+ if err := syscall.Kill(pid, 0); err == syscall.ESRCH {
+ if os.Remove(pidfile) == nil {
+ if os.RemoveAll(path) == nil {
+ i--
+ continue
+ }
+ }
+ }
+ }
+ }
+ // If err != nil, assume that the pid file is not created yet.
+ continue
+ }
+ if err != nil {
+ return "", 0, err
+ }
+ if err := ioutil.WriteFile(pidfile, []byte(strconv.Itoa(syscall.Getpid())), 0600); err != nil {
+ return "", 0, err
+ }
+ return path, i, nil
+ }
+ return "", 0, fmt.Errorf("too many live instances")
+}
diff --git a/syz-fuzzer/fuzzer.go b/syz-fuzzer/fuzzer.go
index 002052736..511758612 100644
--- a/syz-fuzzer/fuzzer.go
+++ b/syz-fuzzer/fuzzer.go
@@ -108,6 +108,7 @@ func main() {
maxCover = make([]cover.Cover, sys.CallCount)
corpusHashes = make(map[Sig]struct{})
+ logf(0, "dialing manager at %v", *flagManager)
conn, err := rpc.Dial("tcp", *flagManager)
if err != nil {
panic(err)
diff --git a/syz-manager/main.go b/syz-manager/main.go
deleted file mode 100644
index acbba0967..000000000
--- a/syz-manager/main.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2015 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package main
-
-import (
- "bytes"
- "encoding/json"
- "flag"
- "fmt"
- "io/ioutil"
- "log"
- "regexp"
- "strings"
-
- "github.com/google/syzkaller/sys"
- "github.com/google/syzkaller/vm"
- _ "github.com/google/syzkaller/vm/kvm"
- _ "github.com/google/syzkaller/vm/local"
- _ "github.com/google/syzkaller/vm/qemu"
-)
-
-var (
- flagConfig = flag.String("config", "", "configuration file")
- flagV = flag.Int("v", 0, "verbosity")
-)
-
-type Config struct {
- Http string
- Workdir string
- Vmlinux string
- Type string
- Count int // number of VMs
- Procs int // number of parallel processes inside of every VM
- Port int
- Nocover bool
- NoDropPrivs bool
- Leak bool // do memory leak checking
- Params map[string]interface{}
- Enable_Syscalls []string
- Disable_Syscalls []string
- Suppressions []string
-}
-
-func main() {
- flag.Parse()
- cfg, syscalls := parseConfig()
- params, err := json.Marshal(cfg.Params)
- if err != nil {
- fatalf("failed to marshal config params: %v", err)
- }
- enabledSyscalls := ""
- if len(syscalls) != 0 {
- buf := new(bytes.Buffer)
- for c := range syscalls {
- fmt.Fprintf(buf, ",%v", c)
- }
- enabledSyscalls = buf.String()[1:]
- logf(1, "enabled syscalls: %v", enabledSyscalls)
- }
- vmCfg := &vm.Config{
- Workdir: cfg.Workdir,
- ManagerPort: cfg.Port,
- Params: params,
- EnabledSyscalls: enabledSyscalls,
- NoCover: cfg.Nocover,
- NoDropPrivs: cfg.NoDropPrivs,
- Leak: cfg.Leak,
- Procs: cfg.Procs,
- }
-
- // Add some builtin suppressions.
- cfg.Suppressions = append(cfg.Suppressions, []string{
- "panic: failed to start executor binary",
- "panic: executor failed: pthread_create failed",
- "panic: failed to create temp dir",
- "Out of memory: Kill process .* \\(syzkaller_fuzze\\)",
- "WARNING: KASAN doesn't support memory hot-add",
- }...)
- for _, s := range cfg.Suppressions {
- re, err := regexp.Compile(s)
- if err != nil {
- fatalf("failed to compile suppression '%v': %v", s, err)
- }
- vmCfg.Suppressions = append(vmCfg.Suppressions, re)
- }
-
- var instances []vm.Instance
- for i := 0; i < cfg.Count; i++ {
- inst, err := vm.Create(cfg.Type, vmCfg, i)
- if err != nil {
- fatalf("failed to create an instance: %v", err)
- }
- instances = append(instances, inst)
- }
- RunManager(cfg, syscalls, instances)
-}
-
-func parseConfig() (*Config, map[int]bool) {
- if *flagConfig == "" {
- fatalf("supply config file name in -config flag")
- }
- data, err := ioutil.ReadFile(*flagConfig)
- if err != nil {
- fatalf("failed to read config file: %v", err)
- }
- cfg := new(Config)
- if err := json.Unmarshal(data, cfg); err != nil {
- fatalf("failed to parse config file: %v", err)
- }
- if cfg.Http == "" {
- fatalf("config param http is empty")
- }
- if cfg.Workdir == "" {
- fatalf("config param workdir is empty")
- }
- if cfg.Vmlinux == "" {
- fatalf("config param vmlinux is empty")
- }
- if cfg.Type == "" {
- fatalf("config param type is empty")
- }
- if cfg.Count <= 0 || cfg.Count > 1000 {
- fatalf("invalid config param count: %v, want (1, 1000]", cfg.Count)
- }
- if cfg.Procs <= 0 {
- cfg.Procs = 1
- }
-
- match := func(call *sys.Call, str string) bool {
- if str == call.CallName || str == call.Name {
- return true
- }
- if len(str) > 1 && str[len(str)-1] == '*' && strings.HasPrefix(call.Name, str[:len(str)-1]) {
- return true
- }
- return false
- }
-
- var syscalls map[int]bool
- if len(cfg.Enable_Syscalls) != 0 || len(cfg.Disable_Syscalls) != 0 {
- syscalls = make(map[int]bool)
- if len(cfg.Enable_Syscalls) != 0 {
- for _, c := range cfg.Enable_Syscalls {
- n := 0
- for _, call := range sys.Calls {
- if match(call, c) {
- syscalls[call.ID] = true
- n++
- }
- }
- if n == 0 {
- fatalf("unknown enabled syscall: %v", c)
- }
- }
- } else {
- for _, call := range sys.Calls {
- syscalls[call.ID] = true
- }
- }
- for _, c := range cfg.Disable_Syscalls {
- n := 0
- for _, call := range sys.Calls {
- if match(call, c) {
- delete(syscalls, call.ID)
- n++
- }
- }
- if n == 0 {
- fatalf("unknown disabled syscall: %v", c)
- }
- }
- // They will be generated anyway.
- syscalls[sys.CallMap["mmap"].ID] = true
- syscalls[sys.CallMap["clock_gettime"].ID] = true
- }
-
- return cfg, syscalls
-}
-
-func logf(v int, msg string, args ...interface{}) {
- if *flagV >= v {
- log.Printf(msg, args...)
- }
-}
-
-func fatalf(msg string, args ...interface{}) {
- log.Fatalf(msg, args...)
-}
diff --git a/syz-manager/manager.go b/syz-manager/manager.go
index 485563603..8cc4d3745 100644
--- a/syz-manager/manager.go
+++ b/syz-manager/manager.go
@@ -5,29 +5,45 @@ package main
import (
"encoding/hex"
+ "flag"
"fmt"
+ "io/ioutil"
"net"
"net/rpc"
+ "os"
"path/filepath"
+ "regexp"
+ "log"
"sync"
"time"
+ "github.com/google/syzkaller/config"
"github.com/google/syzkaller/cover"
"github.com/google/syzkaller/prog"
. "github.com/google/syzkaller/rpctype"
"github.com/google/syzkaller/sys"
"github.com/google/syzkaller/vm"
+ _ "github.com/google/syzkaller/vm/kvm"
+ _ "github.com/google/syzkaller/vm/qemu"
+)
+
+var (
+ flagConfig = flag.String("config", "", "configuration file")
+ flagV = flag.Int("v", 0, "verbosity")
+ flagDebug = flag.Bool("debug", false, "dump all VM output to console")
)
type Manager struct {
- cfg *Config
+ cfg *config.Config
+ crashdir string
+ port int
persistentCorpus *PersistentSet
- instances []vm.Instance
startTime time.Time
stats map[string]uint64
- mu sync.Mutex
- syscalls map[int]bool
+ mu sync.Mutex
+ enabledSyscalls string
+ suppressions []*regexp.Regexp
candidates [][]byte // untriaged inputs
corpus []RpcInput
@@ -42,15 +58,33 @@ type Fuzzer struct {
input int
}
-func RunManager(cfg *Config, syscalls map[int]bool, instances []vm.Instance) {
+func main() {
+ flag.Parse()
+ cfg, syscalls, suppressions, err := config.Parse(*flagConfig)
+ if err != nil {
+ fatalf("%v", err)
+ }
+ logf(1, "enabled syscalls: %v", syscalls)
+ if *flagDebug {
+ cfg.Debug = true
+ cfg.Count = 1
+ }
+ RunManager(cfg, syscalls, suppressions)
+}
+
+func RunManager(cfg *config.Config, enabledSyscalls string, suppressions []*regexp.Regexp) {
+ crashdir := filepath.Join(cfg.Workdir, "crashes")
+ os.MkdirAll(crashdir, 0700)
+
mgr := &Manager{
- cfg: cfg,
- startTime: time.Now(),
- stats: make(map[string]uint64),
- instances: instances,
- syscalls: syscalls,
- corpusCover: make([]cover.Cover, sys.CallCount),
- fuzzers: make(map[string]*Fuzzer),
+ cfg: cfg,
+ crashdir: crashdir,
+ startTime: time.Now(),
+ stats: make(map[string]uint64),
+ enabledSyscalls: enabledSyscalls,
+ suppressions: suppressions,
+ corpusCover: make([]cover.Cover, sys.CallCount),
+ fuzzers: make(map[string]*Fuzzer),
}
logf(0, "loading corpus...")
@@ -70,24 +104,153 @@ func RunManager(cfg *Config, syscalls map[int]bool, instances []vm.Instance) {
mgr.initHttp()
// Create RPC server for fuzzers.
- rpcAddr := fmt.Sprintf("localhost:%v", cfg.Port)
- ln, err := net.Listen("tcp", rpcAddr)
+ ln, err := net.Listen("tcp", "localhost:0")
if err != nil {
- fatalf("failed to listen on port %v: %v", cfg.Port, err)
+ fatalf("failed to listen on localhost:0: %v", err)
}
- logf(0, "serving rpc on tcp://%v", rpcAddr)
+ logf(0, "serving rpc on tcp://%v", ln.Addr())
+ mgr.port = ln.Addr().(*net.TCPAddr).Port
s := rpc.NewServer()
s.Register(mgr)
go s.Accept(ln)
- for _, inst := range mgr.instances {
- go inst.Run()
+ for i := 0; i < cfg.Count; i++ {
+ go func() {
+ for {
+ vmCfg, err := config.CreateVMConfig(cfg)
+ if err != nil {
+ fatalf("failed to create VM config: %v", err)
+ }
+ if !mgr.runInstance(vmCfg) {
+ time.Sleep(10 * time.Second)
+ }
+ }
+ }()
}
select {}
}
+func (mgr *Manager) runInstance(vmCfg *vm.Config) bool {
+ inst, err := vm.Create(mgr.cfg.Type, vmCfg)
+ if err != nil {
+ logf(0, "failed to create instance: %v", err)
+ return false
+ }
+ defer inst.Close()
+
+ if err := inst.Copy(filepath.Join(mgr.cfg.Syzkaller, "bin/syz-fuzzer"), "/syz-fuzzer"); err != nil {
+ logf(0, "failed to copy binary: %v", err)
+ return false
+ }
+ if err := inst.Copy(filepath.Join(mgr.cfg.Syzkaller, "bin/syz-executor"), "/syz-executor"); err != nil {
+ logf(0, "failed to copy binary: %v", err)
+ return false
+ }
+
+ // TODO: this should be present in the image.
+ _, errc, err := inst.Run(10*time.Second, "echo -n 0 > /proc/sys/debug/exception-trace")
+ if err == nil {
+ <-errc
+ }
+
+ // Run the fuzzer binary.
+ cover := ""
+ if mgr.cfg.NoCover {
+ cover = "-nocover=1"
+ }
+ dropprivs := ""
+ if mgr.cfg.NoDropPrivs {
+ dropprivs = "-dropprivs=0"
+ }
+ calls := ""
+ if mgr.enabledSyscalls != "" {
+ calls = "-calls=" + mgr.enabledSyscalls
+ }
+
+ outputC, errorC, err := inst.Run(time.Hour, fmt.Sprintf("/syz-fuzzer -name %v -executor /syz-executor -manager %v:%v -procs %v -leak=%v %v %v %v",
+ vmCfg.Name, inst.HostAddr(), mgr.port, mgr.cfg.Procs, mgr.cfg.Leak, cover, dropprivs, calls))
+ if err != nil {
+ logf(0, "failed to run fuzzer: %v", err)
+ return false
+ }
+ var output []byte
+ matchPos := 0
+ const (
+ beforeContext = 256 << 10
+ afterContext = 64 << 10
+ )
+ for {
+ select {
+ case err := <-errorC:
+ switch err {
+ case vm.TimeoutErr:
+ logf(0, "%v: running long enough, restarting", vmCfg.Name)
+ return true
+ default:
+ mgr.saveCrasher(vmCfg.Name, "lost connection", output)
+ return true
+ }
+ case out := <-outputC:
+ output = append(output, out...)
+ if loc := vm.CrashRe.FindAllIndex(output[matchPos:], -1); len(loc) != 0 {
+ // Give it some time to finish writing the error message.
+ timer := time.NewTimer(5 * time.Second).C
+ loop:
+ for {
+ select {
+ case out = <-outputC:
+ output = append(output, out...)
+ case <-timer:
+ break loop
+ }
+ }
+ loc = vm.CrashRe.FindAllIndex(output[matchPos:], -1)
+ for i := range loc {
+ loc[i][0] += matchPos
+ loc[i][1] += matchPos
+ }
+ start := loc[0][0] - beforeContext
+ if start < 0 {
+ start = 0
+ }
+ end := loc[len(loc)-1][1] + afterContext
+ if end > len(output) {
+ end = len(output)
+ }
+ mgr.saveCrasher(vmCfg.Name, string(output[loc[0][0]:loc[0][1]]), output[start:end])
+ }
+ if len(output) > 2*beforeContext {
+ copy(output, output[len(output)-beforeContext:])
+ output = output[:beforeContext]
+ }
+ matchPos = len(output) - 128
+ if matchPos < 0 {
+ matchPos = 0
+ }
+ case <-time.NewTicker(time.Minute).C:
+ mgr.saveCrasher(vmCfg.Name, "no output", output)
+ return true
+ }
+ }
+}
+
+func (mgr *Manager) saveCrasher(name, what string, output []byte) {
+ for _, re := range mgr.suppressions {
+ if re.Match(output) {
+ logf(1, "%v: suppressing '%v' with '%v'", name, what, re.String())
+ return
+ }
+ }
+ output = append(output, '\n')
+ output = append(output, what...)
+ output = append(output, '\n')
+ filename := fmt.Sprintf("crash-%v-%v", name, time.Now().UnixNano())
+ logf(0, "%v: saving crash '%v' to %v", name, what, filename)
+ ioutil.WriteFile(filepath.Join(mgr.crashdir, filename), output, 0660)
+}
+
func (mgr *Manager) minimizeCorpus() {
- if !mgr.cfg.Nocover && len(mgr.corpus) != 0 {
+ if !mgr.cfg.NoCover && len(mgr.corpus) != 0 {
// First, sort corpus per call.
type Call struct {
inputs []RpcInput
@@ -148,7 +311,7 @@ func (mgr *Manager) Connect(a *ConnectArgs, r *ConnectRes) error {
}
func (mgr *Manager) NewInput(a *NewInputArgs, r *int) error {
- logf(2, "new input from fuzzer %v", a.Name)
+ logf(2, "new input from %v for syscall %v", a.Name, a.Call)
mgr.mu.Lock()
defer mgr.mu.Unlock()
@@ -190,3 +353,13 @@ func (mgr *Manager) Poll(a *PollArgs, r *PollRes) error {
return nil
}
+
+func logf(v int, msg string, args ...interface{}) {
+ if *flagV >= v {
+ log.Printf(msg, args...)
+ }
+}
+
+func fatalf(msg string, args ...interface{}) {
+ log.Fatalf(msg, args...)
+}
diff --git a/vm/kvm/kvm.go b/vm/kvm/kvm.go
index 2b10f1eaa..b9185d91f 100644
--- a/vm/kvm/kvm.go
+++ b/vm/kvm/kvm.go
@@ -4,417 +4,249 @@
package kvm
import (
- "encoding/json"
"fmt"
- "io"
"io/ioutil"
- "log"
"os"
"os/exec"
"path/filepath"
- "regexp"
+ "runtime"
+ "strconv"
"sync"
"time"
+ "github.com/google/syzkaller/fileutil"
"github.com/google/syzkaller/vm"
)
-const hostAddr = "192.168.33.1"
-const logOutput = false
-
func init() {
vm.Register("kvm", ctor)
}
-type kvm struct {
- params
- workdir string
- crashdir string
- callsFlag string
- id int
- mgrPort int
-}
+type instance struct {
+ cfg *vm.Config
+ sandbox string
+ sandboxPath string
+ lkvm *exec.Cmd
+ readerC chan error
+ waiterC chan error
-type params struct {
- Lkvm string
- Kernel string
- Cmdline string
- Fuzzer string
- Executor string
- Cpu int
- Mem int
+ mu sync.Mutex
+ outputB []byte
+ outputC chan []byte
}
-func ctor(cfg *vm.Config, index int) (vm.Instance, error) {
- p := new(params)
- if err := json.Unmarshal(cfg.Params, p); err != nil {
- return nil, fmt.Errorf("failed to unmarshal kvm params: %v", err)
- }
- if _, err := os.Stat(p.Kernel); err != nil {
- return nil, fmt.Errorf("kernel '%v' does not exist: %v", p.Kernel, err)
- }
- if _, err := os.Stat(p.Fuzzer); err != nil {
- return nil, fmt.Errorf("fuzzer binary '%v' does not exist: %v", p.Fuzzer, err)
+func ctor(cfg *vm.Config) (vm.Instance, error) {
+ sandbox := fmt.Sprintf("syz-%v", cfg.Index)
+ inst := &instance{
+ cfg: cfg,
+ sandbox: sandbox,
+ sandboxPath: filepath.Join(os.Getenv("HOME"), ".lkvm", sandbox),
}
- if _, err := os.Stat(p.Executor); err != nil {
- return nil, fmt.Errorf("executor binary '%v' does not exist: %v", p.Executor, err)
- }
- if p.Lkvm == "" {
- p.Lkvm = "lkvm"
- }
- if p.Cpu <= 0 || p.Cpu > 1024 {
- return nil, fmt.Errorf("bad kvm cpu: %v, want [1-1024]", p.Cpu)
- }
- if p.Mem < 128 || p.Mem > 1048576 {
- return nil, fmt.Errorf("bad kvm mem: %v, want [128-1048576]", p.Mem)
- }
-
- crashdir := filepath.Join(cfg.Workdir, "crashes")
- os.MkdirAll(crashdir, 0770)
-
- workdir := filepath.Join(cfg.Workdir, "kvm")
- os.MkdirAll(workdir, 0770)
+ closeInst := inst
+ defer func() {
+ if closeInst != nil {
+ closeInst.Close()
+ }
+ }()
- vm := &kvm{
- params: *p,
- workdir: workdir,
- crashdir: crashdir,
- id: index,
- mgrPort: cfg.ManagerPort,
+ if err := validateConfig(cfg); err != nil {
+ return nil, err
}
- if cfg.EnabledSyscalls != "" {
- vm.callsFlag = "-calls=" + cfg.EnabledSyscalls
+ os.RemoveAll(inst.sandboxPath)
+ os.Remove(inst.sandboxPath + ".sock")
+ out, err := exec.Command(inst.cfg.Bin, "setup", sandbox).CombinedOutput()
+ if err != nil {
+ return nil, fmt.Errorf("failed to lkvm setup: %v\n%s", err, out)
}
-
- return vm, nil
-}
-
-func (vm *kvm) Run() {
- log.Printf("kvm/%v: started\n", vm.id)
- sandbox := fmt.Sprintf("syz-%v", vm.id)
- sandboxPath := filepath.Join(os.Getenv("HOME"), ".lkvm", sandbox)
- scriptPath := filepath.Join(vm.workdir, sandbox+".sh")
- script := fmt.Sprintf("#! /bin/bash\n/syzkaller_fuzzer -name kvm/%v -executor /syzkaller_executor -manager %v:%v %v\n",
- vm.id, hostAddr, vm.mgrPort, vm.callsFlag)
- if err := ioutil.WriteFile(scriptPath, []byte(script), 0770); err != nil {
- log.Fatalf("failed to create run script: %v", err)
+ scriptPath := filepath.Join(cfg.Workdir, "script.sh")
+ if err := ioutil.WriteFile(scriptPath, []byte(script), 0700); err != nil {
+ return nil, fmt.Errorf("failed to create temp file: %v", err)
}
- for run := 0; ; run++ {
- logname := filepath.Join(vm.workdir, fmt.Sprintf("log%v-%v-%v", vm.id, run, time.Now().Unix()))
- var logf *os.File
- if logOutput {
- var err error
- logf, err = os.Create(logname)
- if err != nil {
- log.Printf("failed to create log file: %v", err)
- time.Sleep(10 * time.Second)
- continue
- }
- }
- rpipe, wpipe, err := os.Pipe()
- if err != nil {
- log.Printf("failed to create pipe: %v", err)
- if logf != nil {
- logf.Close()
- }
- time.Sleep(10 * time.Second)
- continue
- }
- os.RemoveAll(sandboxPath)
- os.Remove(sandboxPath + ".sock")
- out, err := exec.Command(vm.Lkvm, "setup", sandbox).CombinedOutput()
- if err != nil {
- log.Printf("failed to lkvm setup: %v\n%s", err, out)
- if logf != nil {
- logf.Close()
- }
- rpipe.Close()
- wpipe.Close()
- time.Sleep(10 * time.Second)
- continue
- }
- if err := copyFile(vm.Fuzzer, filepath.Join(sandboxPath, "/syzkaller_fuzzer")); err != nil {
- log.Printf("failed to copy file into sandbox: %v", err)
- os.RemoveAll(sandboxPath)
- if logf != nil {
- logf.Close()
- }
- rpipe.Close()
- wpipe.Close()
- time.Sleep(10 * time.Second)
- continue
- }
- if err := copyFile(vm.Executor, filepath.Join(sandboxPath, "/syzkaller_executor")); err != nil {
- log.Printf("failed to copy file into sandbox: %v", err)
- os.RemoveAll(sandboxPath)
- if logf != nil {
- logf.Close()
- }
- rpipe.Close()
- wpipe.Close()
- time.Sleep(10 * time.Second)
- continue
- }
- os.Chmod(filepath.Join(sandboxPath, "/syzkaller_fuzzer"), 0770)
- os.Chmod(filepath.Join(sandboxPath, "/syzkaller_executor"), 0770)
- inst := &Instance{
- id: vm.id,
- crashdir: vm.crashdir,
- params: vm.params,
- name: fmt.Sprintf("kvm/%v-%v", vm.id, run),
- sandbox: sandbox,
- sandboxPath: sandboxPath,
- scriptPath: scriptPath,
- callsFlag: vm.callsFlag,
- log: logf,
- rpipe: rpipe,
- wpipe: wpipe,
- cmds: make(map[*Command]bool),
- }
- inst.Run()
- inst.Shutdown()
- time.Sleep(10 * time.Second)
- }
-}
-type Instance struct {
- params
- sync.Mutex
- id int
- crashdir string
- name string
- sandbox string
- sandboxPath string
- scriptPath string
- callsFlag string
- log *os.File
- rpipe *os.File
- wpipe *os.File
- cmds map[*Command]bool
- kvm *Command
-}
+ rpipe, wpipe, err := os.Pipe()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create pipe: %v", err)
+ }
-type Command struct {
- sync.Mutex
- cmd *exec.Cmd
- done chan struct{}
- failed bool
- out []byte
- outpos int
-}
+ inst.lkvm = exec.Command("taskset", "-c", strconv.Itoa(inst.cfg.Index%runtime.NumCPU()),
+ inst.cfg.Bin, "sandbox",
+ "--disk", inst.sandbox,
+ "--kernel", inst.cfg.Kernel,
+ "--params", "slub_debug=UZ "+inst.cfg.Cmdline,
+ "--mem", strconv.Itoa(inst.cfg.Mem),
+ "--cpus", strconv.Itoa(inst.cfg.Cpu),
+ "--network", "mode=user",
+ "--sandbox", scriptPath,
+ )
+ inst.lkvm.Stdout = wpipe
+ inst.lkvm.Stderr = wpipe
+ if err := inst.lkvm.Start(); err != nil {
+ rpipe.Close()
+ wpipe.Close()
+ return nil, fmt.Errorf("failed to start lkvm: %v", err)
+ }
-func (inst *Instance) Run() {
- var outputMu sync.Mutex
- var output []byte
+ // Start output reading goroutine.
+ inst.readerC = make(chan error)
go func() {
var buf [64 << 10]byte
for {
- n, err := inst.rpipe.Read(buf[:])
+ n, err := rpipe.Read(buf[:])
if n != 0 {
- outputMu.Lock()
- output = append(output, buf[:n]...)
- outputMu.Unlock()
- if inst.log != nil {
- inst.log.Write(buf[:n])
+ if inst.cfg.Debug {
+ os.Stdout.Write(buf[:n])
+ os.Stdout.Write([]byte{'\n'})
+ }
+ inst.mu.Lock()
+ inst.outputB = append(inst.outputB, buf[:n]...)
+ if inst.outputC != nil {
+ select {
+ case inst.outputC <- inst.outputB:
+ inst.outputB = nil
+ default:
+ }
}
+ inst.mu.Unlock()
+ time.Sleep(time.Second)
}
if err != nil {
- break
+ rpipe.Close()
+ inst.readerC <- err
+ return
}
}
}()
- // Start the instance.
- inst.kvm = inst.CreateCommand(
- "taskset", "1",
- inst.Lkvm, "sandbox",
- "--disk", inst.sandbox,
- fmt.Sprintf("--mem=%v", inst.Mem),
- fmt.Sprintf("--cpus=%v", inst.Cpu),
- "--kernel", inst.Kernel,
- "--network", "mode=user",
- "--sandbox", inst.scriptPath,
- )
-
- start := time.Now()
- deadline := start.Add(time.Hour)
- lastOutput := time.Now()
- lastOutputLen := 0
- matchPos := 0
- crashRe := regexp.MustCompile("\\[ cut here \\]|Kernel panic| BUG: | WARNING: | INFO: |unable to handle kernel NULL pointer dereference|general protection fault|UBSAN:")
- const contextSize = 64 << 10
- for range time.NewTicker(5 * time.Second).C {
- outputMu.Lock()
- if lastOutputLen != len(output) {
- lastOutput = time.Now()
- }
- if loc := crashRe.FindAllIndex(output[matchPos:], -1); len(loc) != 0 {
- // Give it some time to finish writing the error message.
- outputMu.Unlock()
- time.Sleep(5 * time.Second)
- outputMu.Lock()
- loc = crashRe.FindAllIndex(output[matchPos:], -1)
- for i := range loc {
- loc[i][0] += matchPos
- loc[i][1] += matchPos
- }
- start := loc[0][0] - contextSize
- if start < 0 {
- start = 0
- }
- end := loc[len(loc)-1][1] + contextSize
- if end > len(output) {
- end = len(output)
- }
- inst.SaveCrasher(output[start:end])
- }
- if len(output) > 2*contextSize {
- copy(output, output[len(output)-contextSize:])
- output = output[:contextSize]
- }
- matchPos = len(output) - 128
- if matchPos < 0 {
- matchPos = 0
- }
- lastOutputLen = len(output)
- outputMu.Unlock()
+ // Wait for the lkvm asynchronously.
+ inst.waiterC = make(chan error, 1)
+ go func() {
+ err := inst.lkvm.Wait()
+ wpipe.Close()
+ inst.waiterC <- err
+ }()
- if time.Since(lastOutput) > 3*time.Minute {
- time.Sleep(time.Second)
- outputMu.Lock()
- output = append(output, "\nno output from fuzzer, restarting\n"...)
- inst.SaveCrasher(output)
- outputMu.Unlock()
- inst.Logf("no output from fuzzer, restarting")
- inst.kvm.cmd.Process.Kill()
- inst.kvm.cmd.Process.Kill()
- return
- }
- if inst.kvm.Exited() {
- time.Sleep(time.Second)
- outputMu.Lock()
- output = append(output, "\nfuzzer binary stopped or lost connection\n"...)
- inst.SaveCrasher(output)
- outputMu.Unlock()
- inst.Logf("fuzzer binary stopped or lost connection")
- return
- }
- if time.Now().After(deadline) {
- inst.Logf("running for long enough, restarting")
- inst.kvm.cmd.Process.Kill()
- inst.kvm.cmd.Process.Kill()
- return
- }
+ // Wait for the script to start serving.
+ _, errc, err := inst.Run(10*time.Minute, "mount -t debugfs none /sys/kernel/debug/")
+ if err == nil {
+ err = <-errc
+ }
+ if err != nil {
+ return nil, fmt.Errorf("failed to run script: %v", err)
}
-}
-func (inst *Instance) SaveCrasher(output []byte) {
- ioutil.WriteFile(filepath.Join(inst.crashdir, fmt.Sprintf("crash%v-%v", inst.id, time.Now().UnixNano())), output, 0660)
+ closeInst = nil
+ return inst, nil
}
-func (inst *Instance) Shutdown() {
- defer func() {
- os.RemoveAll(inst.sandboxPath)
- inst.rpipe.Close()
- inst.wpipe.Close()
- if inst.log != nil {
- inst.log.Close()
- }
- }()
- if inst.kvm.cmd == nil {
- // CreateCommand should have been failed very early.
- return
+func validateConfig(cfg *vm.Config) error {
+ if cfg.Bin == "" {
+ cfg.Bin = "lkvm"
}
- for try := 0; try < 10; try++ {
- inst.kvm.cmd.Process.Kill()
- time.Sleep(time.Second)
- inst.Lock()
- n := len(inst.cmds)
- inst.Unlock()
- if n == 0 {
- return
- }
+ if cfg.Image != "" {
+ return fmt.Errorf("lkvm does not support custom images")
}
- inst.Logf("hanged processes after kill")
- inst.Lock()
- for cmd := range inst.cmds {
- cmd.cmd.Process.Kill()
- cmd.cmd.Process.Kill()
+ if cfg.Sshkey != "" {
+ return fmt.Errorf("lkvm does not need ssh key")
}
- inst.Unlock()
- time.Sleep(3 * time.Second)
-}
-
-func (inst *Instance) CreateCommand(args ...string) *Command {
- if inst.log != nil {
- fmt.Fprintf(inst.log, "executing command: %v\n", args)
+ if _, err := os.Stat(cfg.Kernel); err != nil {
+ return fmt.Errorf("kernel file '%v' does not exist: %v", cfg.Kernel, err)
}
- cmd := &Command{}
- cmd.done = make(chan struct{})
- cmd.cmd = exec.Command(args[0], args[1:]...)
- cmd.cmd.Stdout = inst.wpipe
- cmd.cmd.Stderr = inst.wpipe
- if err := cmd.cmd.Start(); err != nil {
- inst.Logf("failed to start command '%v': %v\n", args, err)
- cmd.failed = true
- close(cmd.done)
- return cmd
+ if cfg.Cpu <= 0 || cfg.Cpu > 1024 {
+ return fmt.Errorf("bad qemu cpu: %v, want [1-1024]", cfg.Cpu)
}
- inst.Lock()
- inst.cmds[cmd] = true
- inst.Unlock()
- go func() {
- err := cmd.cmd.Wait()
- inst.Lock()
- delete(inst.cmds, cmd)
- inst.Unlock()
- if inst.log != nil {
- fmt.Fprintf(inst.log, "command '%v' exited: %v\n", args, err)
- }
- cmd.failed = err != nil
- close(cmd.done)
- }()
- return cmd
+ if cfg.Mem < 128 || cfg.Mem > 1048576 {
+ return fmt.Errorf("bad qemu mem: %v, want [128-1048576]", cfg.Mem)
+ }
+ return nil
}
-func (inst *Instance) Logf(str string, args ...interface{}) {
- fmt.Fprintf(inst.wpipe, str+"\n", args...)
- log.Printf("%v: "+str, append([]interface{}{inst.name}, args...)...)
+func (inst *instance) HostAddr() string {
+ return "192.168.33.1"
}
-func (cmd *Command) Wait(max time.Duration) bool {
- select {
- case <-cmd.done:
- return !cmd.failed
- case <-time.After(max):
- return false
+func (inst *instance) Close() {
+ if inst.lkvm != nil {
+ inst.lkvm.Process.Kill()
+ err := <-inst.waiterC
+ inst.waiterC <- err // repost it for waiting goroutines
+ <-inst.readerC
}
+ os.RemoveAll(inst.cfg.Workdir)
+ os.RemoveAll(inst.sandboxPath)
+ os.Remove(inst.sandboxPath + ".sock")
}
-func (cmd *Command) Exited() bool {
- select {
- case <-cmd.done:
- return true
- default:
- return false
+func (inst *instance) Copy(hostSrc, vmDst string) error {
+ dst := filepath.Join(inst.sandboxPath, vmDst)
+ if err := fileutil.CopyFile(hostSrc, dst, false); err != nil {
+ return err
}
+ return os.Chmod(dst, 0777)
}
-func copyFile(oldfn, newfn string) error {
- oldf, err := os.Open(oldfn)
- if err != nil {
- return err
+func (inst *instance) Run(timeout time.Duration, command string) (<-chan []byte, <-chan error, error) {
+ outputC := make(chan []byte, 10)
+ errorC := make(chan error, 1)
+ inst.mu.Lock()
+ inst.outputB = nil
+ inst.outputC = outputC
+ inst.mu.Unlock()
+
+ cmdFile := filepath.Join(inst.sandboxPath, "/syz-cmd")
+ tmpFile := cmdFile + "-tmp"
+ if err := ioutil.WriteFile(tmpFile, []byte(command), 0700); err != nil {
+ return nil, nil, err
}
- defer oldf.Close()
- newf, err := os.Create(newfn)
- if err != nil {
- return err
+ if err := os.Rename(tmpFile, cmdFile); err != nil {
+ return nil, nil, err
}
- defer newf.Close()
- _, err = io.Copy(newf, oldf)
- if err != nil {
- return err
+
+ signal := func(err error) {
+ time.Sleep(3 * time.Second) // wait for any pending output
+ inst.mu.Lock()
+ inst.outputB = nil
+ inst.outputC = nil
+ inst.mu.Unlock()
+ errorC <- err
}
- return nil
+
+ go func() {
+ timeoutTicker := time.NewTicker(timeout)
+ secondTicker := time.NewTicker(time.Second)
+ var resultErr error
+ loop:
+ for {
+ select {
+ case <-timeoutTicker.C:
+ resultErr = vm.TimeoutErr
+ break loop
+ case <-secondTicker.C:
+ if _, err := os.Stat(cmdFile); err != nil {
+ resultErr = nil
+ break loop
+ }
+ case err := <-inst.waiterC:
+ inst.waiterC <- err // repost it for Close
+ resultErr = fmt.Errorf("lkvm exited")
+ break loop
+ }
+ }
+ signal(resultErr)
+ timeoutTicker.Stop()
+ secondTicker.Stop()
+ }()
+
+ return outputC, errorC, nil
}
+
+const script = `#! /bin/bash
+while true; do
+ if [ -e "/syz-cmd" ]; then
+ /syz-cmd
+ rm -f /syz-cmd
+ else
+ sleep 1
+ fi
+done
+`
diff --git a/vm/local/local.go b/vm/local/local.go
deleted file mode 100644
index 7af7b2c70..000000000
--- a/vm/local/local.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2015 syzkaller project authors. All rights reserved.
-// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
-
-package qemu
-
-import (
- "encoding/json"
- "fmt"
- "log"
- "os"
- "os/exec"
- "syscall"
- "time"
-
- "github.com/google/syzkaller/vm"
-)
-
-func init() {
- vm.Register("local", ctor)
-}
-
-type local struct {
- params
- workdir string
- syscalls string
- id int
- mgrPort int
- nocover bool
-}
-
-type params struct {
- Fuzzer string
- Executor string
-}
-
-func ctor(cfg *vm.Config, index int) (vm.Instance, error) {
- p := new(params)
- if err := json.Unmarshal(cfg.Params, p); err != nil {
- return nil, fmt.Errorf("failed to unmarshal local params: %v", err)
- }
- if _, err := os.Stat(p.Fuzzer); err != nil {
- return nil, fmt.Errorf("fuzzer binary '%v' does not exist: %v", p.Fuzzer, err)
- }
- if _, err := os.Stat(p.Executor); err != nil {
- return nil, fmt.Errorf("executor binary '%v' does not exist: %v", p.Executor, err)
- }
-
- os.MkdirAll(cfg.Workdir, 0770)
-
- // Disable annoying segfault dmesg messages, fuzzer is going to crash a lot.
- etrace, err := os.Open("/proc/sys/debug/exception-trace")
- if err == nil {
- etrace.Write([]byte{'0'})
- etrace.Close()
- }
-
- // Don't write executor core files.
- syscall.Setrlimit(syscall.RLIMIT_CORE, &syscall.Rlimit{0, 0})
-
- loc := &local{
- params: *p,
- workdir: cfg.Workdir,
- syscalls: cfg.EnabledSyscalls,
- nocover: cfg.NoCover,
- id: index,
- mgrPort: cfg.ManagerPort,
- }
- return loc, nil
-}
-
-func (loc *local) Run() {
- name := fmt.Sprintf("local-%v", loc.id)
- log.Printf("%v: started\n", name)
- for run := 0; ; run++ {
- cmd := exec.Command(loc.Fuzzer, "-name", name, "-saveprog", "-executor", loc.Executor,
- "-manager", fmt.Sprintf("localhost:%v", loc.mgrPort), "-dropprivs=0")
- if loc.syscalls != "" {
- cmd.Args = append(cmd.Args, "-calls="+loc.syscalls)
- }
- if loc.nocover {
- cmd.Args = append(cmd.Args, "-nocover")
- }
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- cmd.Dir = loc.workdir
- cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
- if err := cmd.Start(); err != nil {
- log.Printf("failed to start fuzzer binary: %v", err)
- time.Sleep(10 * time.Second)
- continue
- }
- pid := cmd.Process.Pid
- done := make(chan bool)
- go func() {
- select {
- case <-done:
- case <-time.After(time.Hour):
- log.Printf("%v: running for long enough, restarting", name)
- syscall.Kill(-pid, syscall.SIGKILL)
- syscall.Kill(-pid, syscall.SIGKILL)
- syscall.Kill(pid, syscall.SIGKILL)
- syscall.Kill(pid, syscall.SIGKILL)
- }
- }()
- err := cmd.Wait()
- close(done)
- log.Printf("fuzzer binary exited: %v", err)
- time.Sleep(10 * time.Second)
- }
-}
diff --git a/vm/qemu/qemu.go b/vm/qemu/qemu.go
index bb560accb..6116bcacc 100644
--- a/vm/qemu/qemu.go
+++ b/vm/qemu/qemu.go
@@ -4,496 +4,310 @@
package qemu
import (
- "encoding/json"
"fmt"
- "io"
- "io/ioutil"
- "log"
"net"
"os"
"os/exec"
"path/filepath"
- "regexp"
"strconv"
+ "strings"
"sync"
"time"
+ "github.com/google/syzkaller/fileutil"
"github.com/google/syzkaller/vm"
)
-const hostAddr = "10.0.2.10"
-const logOutput = false
-
func init() {
vm.Register("qemu", ctor)
}
-type qemu struct {
- params
- workdir string
- crashdir string
- callsFlag string
- id int
- cfg *vm.Config
+type instance struct {
+ cfg *vm.Config
+ port int
+ image string
+ rpipe *os.File
+ wpipe *os.File
+ qemu *exec.Cmd
+ readerC chan error
+ waiterC chan error
+
+ mu sync.Mutex
+ outputB []byte
+ outputC chan []byte
}
-type params struct {
- Qemu string
- Kernel string
- Cmdline string
- Image string
- Sshkey string
- Fuzzer string
- Executor string
- Port int
- Cpu int
- Mem int
-}
+func ctor(cfg *vm.Config) (vm.Instance, error) {
+ inst := &instance{
+ cfg: cfg,
+ image: filepath.Join(cfg.Workdir, "image"),
+ }
+ closeInst := inst
+ defer func() {
+ if closeInst != nil {
+ closeInst.Close()
+ }
+ }()
-func ctor(cfg *vm.Config, index int) (vm.Instance, error) {
- p := new(params)
- if err := json.Unmarshal(cfg.Params, p); err != nil {
- return nil, fmt.Errorf("failed to unmarshal qemu params: %v", err)
+ if err := validateConfig(cfg); err != nil {
+ return nil, err
}
- if _, err := os.Stat(p.Image); err != nil {
- return nil, fmt.Errorf("image file '%v' does not exist: %v", p.Image, err)
+
+ os.Remove(inst.image)
+ if err := fileutil.CopyFile(inst.cfg.Image, inst.image, true); err != nil {
+ return nil, fmt.Errorf("failed to copy image file: %v", err)
}
- if _, err := os.Stat(p.Sshkey); err != nil {
- return nil, fmt.Errorf("ssh key '%v' does not exist: %v", p.Sshkey, err)
+ var err error
+ inst.rpipe, inst.wpipe, err = os.Pipe()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create pipe: %v", err)
}
- if _, err := os.Stat(p.Fuzzer); err != nil {
- return nil, fmt.Errorf("fuzzer binary '%v' does not exist: %v", p.Fuzzer, err)
+
+ for i := 0; ; i++ {
+ err := inst.Boot()
+ if err == nil {
+ break
+ }
+ if i < 1000 && strings.Contains(err.Error(), "could not set up host forwarding rule") {
+ continue
+ }
+ return nil, err
}
- if _, err := os.Stat(p.Executor); err != nil {
- return nil, fmt.Errorf("executor binary '%v' does not exist: %v", p.Executor, err)
+ closeInst = nil
+ return inst, nil
+}
+
+func validateConfig(cfg *vm.Config) error {
+ if cfg.Bin == "" {
+ cfg.Bin = "qemu-system-x86_64"
}
- if p.Qemu == "" {
- p.Qemu = "qemu-system-x86_64"
+ if _, err := os.Stat(cfg.Kernel); err != nil {
+ return fmt.Errorf("kernel file '%v' does not exist: %v", cfg.Kernel, err)
}
- if p.Port <= 1024 || p.Port >= 64<<10 {
- return nil, fmt.Errorf("bad qemu port: %v, want (1024-65536)", p.Port)
+ if _, err := os.Stat(cfg.Image); err != nil {
+ return fmt.Errorf("image file '%v' does not exist: %v", cfg.Image, err)
}
- p.Port += index
- if p.Cpu <= 0 || p.Cpu > 1024 {
- return nil, fmt.Errorf("bad qemu cpu: %v, want [1-1024]", p.Cpu)
+ if _, err := os.Stat(cfg.Sshkey); err != nil {
+ return fmt.Errorf("ssh key '%v' does not exist: %v", cfg.Sshkey, err)
}
- if p.Mem < 128 || p.Mem > 1048576 {
- return nil, fmt.Errorf("bad qemu mem: %v, want [128-1048576]", p.Mem)
+ if cfg.Cpu <= 0 || cfg.Cpu > 1024 {
+ return fmt.Errorf("bad qemu cpu: %v, want [1-1024]", cfg.Cpu)
}
-
- crashdir := filepath.Join(cfg.Workdir, "crashes")
- os.MkdirAll(crashdir, 0770)
-
- workdir := filepath.Join(cfg.Workdir, "qemu")
- os.MkdirAll(workdir, 0770)
-
- q := &qemu{
- params: *p,
- workdir: workdir,
- crashdir: crashdir,
- id: index,
- cfg: cfg,
- }
-
- if cfg.EnabledSyscalls != "" {
- q.callsFlag = "-calls=" + cfg.EnabledSyscalls
- }
-
- return q, nil
-}
-
-func (q *qemu) Run() {
- log.Printf("qemu/%v: started\n", q.id)
- imagename := filepath.Join(q.workdir, fmt.Sprintf("image%v", q.id))
- for run := 0; ; run++ {
- logname := filepath.Join(q.workdir, fmt.Sprintf("log%v-%v-%v", q.id, run, time.Now().Unix()))
- var logf *os.File
- if logOutput {
- var err error
- logf, err = os.Create(logname)
- if err != nil {
- log.Printf("failed to create log file: %v", err)
- time.Sleep(10 * time.Second)
- continue
- }
- }
- rpipe, wpipe, err := os.Pipe()
- if err != nil {
- log.Printf("failed to create pipe: %v", err)
- if logf != nil {
- logf.Close()
- }
- time.Sleep(10 * time.Second)
- continue
- }
- os.Remove(imagename)
- if err := copyFile(q.Image, imagename); err != nil {
- log.Printf("failed to copy image file: %v", err)
- if logf != nil {
- logf.Close()
- }
- rpipe.Close()
- wpipe.Close()
- time.Sleep(10 * time.Second)
- continue
- }
- inst := &Instance{
- id: q.id,
- crashdir: q.crashdir,
- params: q.params,
- name: fmt.Sprintf("qemu/%v-%v", q.id, run),
- image: imagename,
- callsFlag: q.callsFlag,
- log: logf,
- rpipe: rpipe,
- wpipe: wpipe,
- cfg: q.cfg,
- cmds: make(map[*Command]bool),
- }
- inst.Run()
- inst.Shutdown()
- time.Sleep(10 * time.Second)
+ if cfg.Mem < 128 || cfg.Mem > 1048576 {
+ return fmt.Errorf("bad qemu mem: %v, want [128-1048576]", cfg.Mem)
}
+ return nil
}
-type Instance struct {
- params
- sync.Mutex
- id int
- crashdir string
- name string
- image string
- callsFlag string
- log *os.File
- rpipe *os.File
- wpipe *os.File
- cfg *vm.Config
- cmds map[*Command]bool
- qemu *Command
+func (inst *instance) HostAddr() string {
+ return "10.0.2.10"
}
-type Command struct {
- sync.Mutex
- cmd *exec.Cmd
- done chan struct{}
- failed bool
- out []byte
- outpos int
+func (inst *instance) Close() {
+ if inst.qemu != nil {
+ inst.qemu.Process.Kill()
+ err := <-inst.waiterC
+ inst.waiterC <- err // repost it for waiting goroutines
+ <-inst.readerC
+ }
+ if inst.rpipe != nil {
+ inst.rpipe.Close()
+ }
+ if inst.wpipe != nil {
+ inst.wpipe.Close()
+ }
+ os.RemoveAll(inst.cfg.Workdir)
}
-func (inst *Instance) Run() {
- var outputMu sync.Mutex
- var output []byte
- go func() {
- var buf [64 << 10]byte
- for {
- n, err := inst.rpipe.Read(buf[:])
- if n != 0 {
- outputMu.Lock()
- output = append(output, buf[:n]...)
- outputMu.Unlock()
- if inst.log != nil {
- inst.log.Write(buf[:n])
- }
- }
- if err != nil {
- break
- }
+func (inst *instance) Boot() error {
+ for {
+ // Find an unused TCP port.
+ inst.port = int(time.Now().UnixNano()%(64<<10-1<<10) + 1<<10)
+ ln, err := net.Listen("tcp", fmt.Sprintf("localhost:%v", inst.port))
+ if err == nil {
+ ln.Close()
+ break
}
- }()
-
- // Start the instance.
- // TODO: ignores inst.Cpu
+ }
+ // TODO: ignores inst.cfg.Cpu
args := []string{
- inst.Qemu,
"-hda", inst.image,
- "-m", strconv.Itoa(inst.Mem),
+ "-m", strconv.Itoa(inst.cfg.Mem),
"-net", "nic",
- "-net", fmt.Sprintf("user,host=%v,hostfwd=tcp::%v-:22", hostAddr, inst.Port),
+ "-net", fmt.Sprintf("user,host=%v,hostfwd=tcp::%v-:22", inst.HostAddr(), inst.port),
"-nographic",
"-enable-kvm",
"-numa", "node,nodeid=0,cpus=0-1", "-numa", "node,nodeid=1,cpus=2-3",
"-smp", "sockets=2,cores=2,threads=1",
"-usb", "-usbdevice", "mouse", "-usbdevice", "tablet",
}
- if inst.Kernel != "" {
+ if inst.cfg.Kernel != "" {
args = append(args,
- "-kernel", inst.Kernel,
- "-append", "console=ttyS0 root=/dev/sda debug earlyprintk=serial slub_debug=UZ "+inst.Cmdline,
+ "-kernel", inst.cfg.Kernel,
+ "-append", "console=ttyS0 root=/dev/sda debug earlyprintk=serial slub_debug=UZ "+inst.cfg.Cmdline,
)
}
- inst.qemu = inst.CreateCommand(args...)
- // Wait for ssh server.
+ qemu := exec.Command(inst.cfg.Bin, args...)
+ qemu.Stdout = inst.wpipe
+ qemu.Stderr = inst.wpipe
+ if err := qemu.Start(); err != nil {
+ return fmt.Errorf("failed to start %v %+v: %v", inst.cfg.Bin, args, err)
+ }
+ inst.qemu = qemu
+ // Qemu has started.
+
+ // Start output reading goroutine.
+ inst.readerC = make(chan error)
+ go func(rpipe *os.File) {
+ var buf [64 << 10]byte
+ for {
+ n, err := rpipe.Read(buf[:])
+ if n != 0 {
+ if inst.cfg.Debug {
+ os.Stdout.Write(buf[:n])
+ os.Stdout.Write([]byte{'\n'})
+ }
+ inst.mu.Lock()
+ inst.outputB = append(inst.outputB, buf[:n]...)
+ if inst.outputC != nil {
+ select {
+ case inst.outputC <- inst.outputB:
+ inst.outputB = nil
+ default:
+ }
+ }
+ inst.mu.Unlock()
+ time.Sleep(time.Second)
+ }
+ if err != nil {
+ rpipe.Close()
+ inst.readerC <- err
+ return
+ }
+ }
+ }(inst.rpipe)
+ inst.rpipe = nil
+
+ // Wait for the qemu asynchronously.
+ inst.waiterC = make(chan error, 1)
+ go func() {
+ err := qemu.Wait()
+ inst.wpipe.Close()
+ inst.waiterC <- err
+ }()
+
+ // Wait for ssh server to come up.
time.Sleep(10 * time.Second)
start := time.Now()
for {
- c, err := net.DialTimeout("tcp", fmt.Sprintf("localhost:%v", inst.Port), 3*time.Second)
+ c, err := net.DialTimeout("tcp", fmt.Sprintf("localhost:%v", inst.port), 3*time.Second)
if err == nil {
c.SetDeadline(time.Now().Add(3 * time.Second))
var tmp [1]byte
n, err := c.Read(tmp[:])
c.Close()
if err == nil && n > 0 {
- // ssh is up and responding.
- break
+ break // ssh is up and responding
}
- c.Close()
time.Sleep(3 * time.Second)
}
- if inst.qemu.Exited() {
- output = append(output, "qemu stopped\n"...)
- inst.SaveCrasher(output)
- inst.Logf("qemu stopped")
- return
+ select {
+ case err := <-inst.waiterC:
+ inst.waiterC <- err // repost it for Close
+ time.Sleep(time.Second) // wait for any pending output
+ inst.mu.Lock()
+ output := inst.outputB
+ inst.mu.Unlock()
+ return fmt.Errorf("qemu stopped:\n%v\n", string(output))
+ default:
}
if time.Since(start) > 10*time.Minute {
- outputMu.Lock()
- output = append(output, "ssh server did not start\n"...)
- inst.SaveCrasher(output)
- outputMu.Unlock()
- inst.Logf("ssh server did not start")
- return
- }
- }
- inst.Logf("started vm")
-
- // Copy the binaries into the instance.
- if !inst.CreateSCPCommand(inst.Fuzzer, "/syz-fuzzer").Wait(1*time.Minute) ||
- !inst.CreateSCPCommand(inst.Executor, "/syz-executor").Wait(1*time.Minute) {
- outputMu.Lock()
- output = append(output, "\nfailed to scp binaries into the instance\n"...)
- inst.SaveCrasher(output)
- outputMu.Unlock()
- inst.Logf("failed to scp binaries into the instance")
- return
- }
-
- // Disable annoying segfault dmesg messages, fuzzer is going to crash a lot.
- inst.CreateSSHCommand("echo -n 0 > /proc/sys/debug/exception-trace").Wait(10 * time.Second)
-
- // Run the binary.
- cover := ""
- if inst.cfg.NoCover {
- cover = "-nocover=1"
- }
- dropprivs := ""
- if inst.cfg.NoDropPrivs {
- dropprivs = "-dropprivs=0"
- }
- cmd := inst.CreateSSHCommand(fmt.Sprintf("/syz-fuzzer -name %v -executor /syz-executor -manager %v:%v -procs %v -leak=%v %v %v %v",
- inst.name, hostAddr, inst.cfg.ManagerPort, inst.cfg.Procs, inst.cfg.Leak, cover, dropprivs, inst.callsFlag))
-
- deadline := start.Add(time.Hour)
- lastOutput := time.Now()
- lastOutputLen := 0
- matchPos := 0
- crashRe := regexp.MustCompile("\\[ cut here \\]|Kernel panic| BUG: | WARNING: | INFO: |unable to handle kernel NULL pointer dereference|general protection fault|UBSAN:")
- const (
- beforeContext = 256 << 10
- afterContext = 64 << 10
- )
- for range time.NewTicker(5 * time.Second).C {
- outputMu.Lock()
- if lastOutputLen != len(output) {
- lastOutput = time.Now()
- }
- if loc := crashRe.FindAllIndex(output[matchPos:], -1); len(loc) != 0 {
- // Give it some time to finish writing the error message.
- outputMu.Unlock()
- time.Sleep(5 * time.Second)
- outputMu.Lock()
- loc = crashRe.FindAllIndex(output[matchPos:], -1)
- for i := range loc {
- loc[i][0] += matchPos
- loc[i][1] += matchPos
- }
- start := loc[0][0] - beforeContext
- if start < 0 {
- start = 0
- }
- end := loc[len(loc)-1][1] + afterContext
- if end > len(output) {
- end = len(output)
- }
- inst.SaveCrasher(output[start:end])
- }
- if len(output) > 2*beforeContext {
- copy(output, output[len(output)-beforeContext:])
- output = output[:beforeContext]
- }
- matchPos = len(output) - 128
- if matchPos < 0 {
- matchPos = 0
- }
- lastOutputLen = len(output)
- outputMu.Unlock()
-
- if time.Since(lastOutput) > 3*time.Minute {
- time.Sleep(time.Second)
- outputMu.Lock()
- output = append(output, "\nno output from fuzzer, restarting\n"...)
- inst.SaveCrasher(output)
- outputMu.Unlock()
- inst.Logf("no output from fuzzer, restarting")
- cmd.cmd.Process.Kill()
- cmd.cmd.Process.Kill()
- return
- }
- if cmd.Exited() {
- time.Sleep(time.Second)
- outputMu.Lock()
- output = append(output, "\nfuzzer binary stopped or lost connection\n"...)
- inst.SaveCrasher(output)
- outputMu.Unlock()
- inst.Logf("fuzzer binary stopped or lost connection")
- return
- }
- if time.Now().After(deadline) {
- inst.Logf("running for long enough, restarting")
- cmd.cmd.Process.Kill()
- cmd.cmd.Process.Kill()
- return
+ inst.mu.Lock()
+ output := inst.outputB
+ inst.mu.Unlock()
+ return fmt.Errorf("ssh server did not start:\n%v\n", string(output))
}
}
+ // Drop boot output. It is not interesting if the VM has successfully booted.
+ inst.mu.Lock()
+ inst.outputB = nil
+ inst.mu.Unlock()
+ return nil
}
-func (inst *Instance) SaveCrasher(output []byte) {
- for _, re := range inst.cfg.Suppressions {
- if re.Match(output) {
- log.Printf("qemu/%v: suppressing '%v'", inst.id, re.String())
- return
- }
+func (inst *instance) Copy(hostSrc, vmDst string) error {
+ args := append(inst.sshArgs("-P"), hostSrc, "root@localhost:"+vmDst)
+ cmd := exec.Command("scp", args...)
+ if err := cmd.Start(); err != nil {
+ return err
}
- filename := fmt.Sprintf("crash%v-%v", inst.id, time.Now().UnixNano())
- log.Printf("qemu/%v: saving crash to %v", inst.id, filename)
- ioutil.WriteFile(filepath.Join(inst.crashdir, filename), output, 0660)
-}
-
-func (inst *Instance) Shutdown() {
- defer func() {
- os.Remove(inst.image)
- inst.rpipe.Close()
- inst.wpipe.Close()
- if inst.log != nil {
- inst.log.Close()
+ done := make(chan bool)
+ go func() {
+ select {
+ case <-time.After(time.Minute):
+ cmd.Process.Kill()
+ case <-done:
}
}()
- if inst.qemu.cmd == nil {
- // CreateCommand should have been failed very early.
- return
- }
- for try := 0; try < 10; try++ {
- inst.qemu.cmd.Process.Kill()
- time.Sleep(time.Second)
- inst.Lock()
- n := len(inst.cmds)
- inst.Unlock()
- if n == 0 {
- return
- }
- }
- inst.Logf("hanged processes after kill")
- inst.Lock()
- for cmd := range inst.cmds {
- cmd.cmd.Process.Kill()
- cmd.cmd.Process.Kill()
- }
- inst.Unlock()
- time.Sleep(3 * time.Second)
+ err := cmd.Wait()
+ close(done)
+ return err
}
-func (inst *Instance) CreateCommand(args ...string) *Command {
- if inst.log != nil {
- fmt.Fprintf(inst.log, "executing command: %v\n", args)
+func (inst *instance) Run(timeout time.Duration, command string) (<-chan []byte, <-chan error, error) {
+ outputC := make(chan []byte, 10)
+ errorC := make(chan error, 1)
+ inst.mu.Lock()
+ inst.outputB = nil
+ inst.outputC = outputC
+ inst.mu.Unlock()
+ signal := func(err error) {
+ time.Sleep(3 * time.Second) // wait for any pending output
+ inst.mu.Lock()
+ inst.outputB = nil
+ inst.outputC = nil
+ inst.mu.Unlock()
+ select {
+ case errorC <- err:
+ default:
+ }
}
- cmd := &Command{}
- cmd.done = make(chan struct{})
- cmd.cmd = exec.Command(args[0], args[1:]...)
- cmd.cmd.Stdout = inst.wpipe
- cmd.cmd.Stderr = inst.wpipe
- if err := cmd.cmd.Start(); err != nil {
- inst.Logf("failed to start command '%v': %v\n", args, err)
- cmd.failed = true
- close(cmd.done)
- return cmd
+ args := append(inst.sshArgs("-p"), "root@localhost", command)
+ cmd := exec.Command("ssh", args...)
+ cmd.Stdout = inst.wpipe
+ cmd.Stderr = inst.wpipe
+ if err := cmd.Start(); err != nil {
+ inst.mu.Lock()
+ inst.outputC = nil
+ inst.mu.Unlock()
+ return nil, nil, err
}
- inst.Lock()
- inst.cmds[cmd] = true
- inst.Unlock()
+ done := make(chan bool)
go func() {
- err := cmd.cmd.Wait()
- inst.Lock()
- delete(inst.cmds, cmd)
- inst.Unlock()
- if inst.log != nil {
- fmt.Fprintf(inst.log, "command '%v' exited: %v\n", args, err)
+ select {
+ case <-time.After(timeout):
+ signal(vm.TimeoutErr)
+ cmd.Process.Kill()
+ case <-done:
}
- cmd.failed = err != nil
- close(cmd.done)
}()
- return cmd
-}
-
-func (inst *Instance) CreateSSHCommand(args ...string) *Command {
- args1 := []string{"ssh", "-i", inst.Sshkey, "-p", strconv.Itoa(inst.Port),
- "-o", "ConnectionAttempts=10", "-o", "ConnectTimeout=10",
- "-o", "BatchMode=yes", "-o", "UserKnownHostsFile=/dev/null",
- "-o", "IdentitiesOnly=yes",
- "-o", "StrictHostKeyChecking=no", "root@localhost"}
- return inst.CreateCommand(append(args1, args...)...)
+ go func() {
+ err := cmd.Wait()
+ close(done)
+ signal(err)
+ }()
+ return outputC, errorC, nil
}
-func (inst *Instance) CreateSCPCommand(from, to string) *Command {
- return inst.CreateCommand("scp", "-i", inst.Sshkey, "-P", strconv.Itoa(inst.Port),
- "-o", "ConnectionAttempts=10", "-o", "ConnectTimeout=10",
- "-o", "BatchMode=yes", "-o", "UserKnownHostsFile=/dev/null",
+func (inst *instance) sshArgs(portArg string) []string {
+ return []string{
+ "-i", inst.cfg.Sshkey,
+ portArg, strconv.Itoa(inst.port),
+ "-o", "ConnectionAttempts=10",
+ "-o", "ConnectTimeout=10",
+ "-o", "BatchMode=yes",
+ "-o", "UserKnownHostsFile=/dev/null",
"-o", "IdentitiesOnly=yes",
"-o", "StrictHostKeyChecking=no",
- from, "root@localhost:"+to)
-}
-
-func (inst *Instance) Logf(str string, args ...interface{}) {
- fmt.Fprintf(inst.wpipe, str+"\n", args...)
- log.Printf("%v: "+str, append([]interface{}{inst.name}, args...)...)
-}
-
-func (cmd *Command) Wait(max time.Duration) bool {
- select {
- case <-cmd.done:
- return !cmd.failed
- case <-time.After(max):
- return false
- }
-}
-
-func (cmd *Command) Exited() bool {
- select {
- case <-cmd.done:
- return true
- default:
- return false
- }
-}
-
-var copySem sync.Mutex
-
-func copyFile(oldfn, newfn string) error {
- copySem.Lock()
- defer copySem.Unlock()
-
- oldf, err := os.Open(oldfn)
- if err != nil {
- return err
- }
- defer oldf.Close()
- newf, err := os.Create(newfn)
- if err != nil {
- return err
}
- defer newf.Close()
- _, err = io.Copy(newf, oldf)
- if err != nil {
- return err
- }
- return nil
}
diff --git a/vm/vm.go b/vm/vm.go
index 143100890..49ce4ac87 100644
--- a/vm/vm.go
+++ b/vm/vm.go
@@ -4,27 +4,41 @@
package vm
import (
+ "errors"
"fmt"
"regexp"
+ "time"
)
+// Instance represents a Linux VM or a remote physical machine.
type Instance interface {
- Run()
+ // Copy copies a hostSrc file to vmDst file (think of scp).
+ Copy(hostSrc, vmDst string) error
+ // Run runs cmd inside of the VM (think of ssh cmd).
+ // outc receives combined cmd and kernel console output.
+ // errc receives either command Wait return error or vm.TimeoutErr.
+ Run(timeout time.Duration, command string) (outc <-chan []byte, errc <-chan error, err error)
+ // HostAddr returns ip address of the host as seen by the VM.
+ HostAddr() string
+ // Close stops and destroys the VM.
+ Close()
}
type Config struct {
- Workdir string
- ManagerPort int
- Params []byte
- EnabledSyscalls string
- Suppressions []*regexp.Regexp
- NoCover bool
- NoDropPrivs bool
- Leak bool
- Procs int
+ Name string
+ Index int
+ Workdir string
+ Bin string
+ Kernel string
+ Cmdline string
+ Image string
+ Sshkey string
+ Cpu int
+ Mem int
+ Debug bool
}
-type ctorFunc func(cfg *Config, index int) (Instance, error)
+type ctorFunc func(cfg *Config) (Instance, error)
var ctors = make(map[string]ctorFunc)
@@ -32,10 +46,16 @@ func Register(typ string, ctor ctorFunc) {
ctors[typ] = ctor
}
-func Create(typ string, cfg *Config, index int) (Instance, error) {
+// Create creates and boots a new VM instance.
+func Create(typ string, cfg *Config) (Instance, error) {
ctor := ctors[typ]
if ctor == nil {
return nil, fmt.Errorf("unknown instance type '%v'", typ)
}
- return ctor(cfg, index)
+ return ctor(cfg)
}
+
+var (
+ CrashRe = regexp.MustCompile("\\[ cut here \\]|Kernel panic.*|BUG:.*|WARNING:.*|INFO:.*|unable to handle|general protection fault|UBSAN:.*")
+ TimeoutErr = errors.New("timeout")
+)