aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2018-05-03 14:41:34 +0200
committerDmitry Vyukov <dvyukov@google.com>2018-05-03 14:41:34 +0200
commit9fe5658a1b7320b756d02cf2075dc5c735f86ff4 (patch)
treec7960e2ef3f7cef6cb8e63df115c4e07574f7024
parent39302300d91e4875a2e2f475bb7938dfc86a5e20 (diff)
gometalinter: check dot imports
Update #538
-rw-r--r--.gometalinter.json2
-rw-r--r--pkg/db/db.go6
-rw-r--r--pkg/repro/repro.go4
-rw-r--r--pkg/rpctype/rpc.go4
-rw-r--r--syz-ci/jobs.go32
-rw-r--r--syz-ci/manager.go34
-rw-r--r--syz-ci/managercmd.go10
-rw-r--r--syz-ci/syz-ci.go6
-rw-r--r--syz-ci/syzupdater.go46
-rw-r--r--syz-ci/testing.go6
-rw-r--r--syz-fuzzer/fuzzer.go62
-rw-r--r--syz-fuzzer/proc.go32
-rw-r--r--syz-fuzzer/testing.go34
-rw-r--r--syz-hub/http.go12
-rw-r--r--syz-hub/hub.go36
-rw-r--r--syz-hub/state/state.go38
-rw-r--r--syz-manager/cover.go6
-rw-r--r--syz-manager/html.go10
-rw-r--r--syz-manager/manager.go190
-rw-r--r--tools/syz-execprog/execprog.go32
-rw-r--r--tools/syz-stress/stress.go24
-rw-r--r--vm/adb/adb.go26
-rw-r--r--vm/gce/gce.go22
-rw-r--r--vm/isolated/isolated.go36
-rw-r--r--vm/qemu/qemu.go8
25 files changed, 359 insertions, 359 deletions
diff --git a/.gometalinter.json b/.gometalinter.json
index 9c58185f5..d919247b6 100644
--- a/.gometalinter.json
+++ b/.gometalinter.json
@@ -25,6 +25,6 @@
"exported .* should have comment",
"comment on exported type",
"comment on .* should be of the form",
- "should not use dot imports"
+ "sys/(akaros|freebsd|fuchsia|linux|netbsd|test|windows)/(386|amd64|arm|arm64|ppc64le|32|64).go.* should not use dot imports"
]
}
diff --git a/pkg/db/db.go b/pkg/db/db.go
index ab609c513..4d48e1229 100644
--- a/pkg/db/db.go
+++ b/pkg/db/db.go
@@ -18,7 +18,7 @@ import (
"io/ioutil"
"os"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
)
@@ -178,7 +178,7 @@ func deserializeDB(r *bufio.Reader) (version uint64, records map[string]Record,
records = make(map[string]Record)
ver, err := deserializeHeader(r)
if err != nil {
- Logf(0, "failed to deserialize database header: %v", err)
+ log.Logf(0, "failed to deserialize database header: %v", err)
return
}
version = ver
@@ -188,7 +188,7 @@ func deserializeDB(r *bufio.Reader) (version uint64, records map[string]Record,
return
}
if err != nil {
- Logf(0, "failed to deserialize database record: %v", err)
+ log.Logf(0, "failed to deserialize database record: %v", err)
return
}
uncompacted++
diff --git a/pkg/repro/repro.go b/pkg/repro/repro.go
index b10f41af2..b2601793b 100644
--- a/pkg/repro/repro.go
+++ b/pkg/repro/repro.go
@@ -12,7 +12,7 @@ import (
"time"
"github.com/google/syzkaller/pkg/csource"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/report"
"github.com/google/syzkaller/prog"
@@ -629,7 +629,7 @@ func (ctx *context) returnInstance(inst *instance) {
func (ctx *context) reproLog(level int, format string, args ...interface{}) {
prefix := fmt.Sprintf("reproducing crash '%v': ", ctx.crashTitle)
- Logf(level, prefix+format, args...)
+ log.Logf(level, prefix+format, args...)
ctx.stats.Log = append(ctx.stats.Log, []byte(fmt.Sprintf(format, args...)+"\n")...)
}
diff --git a/pkg/rpctype/rpc.go b/pkg/rpctype/rpc.go
index 18d2a04f5..939805212 100644
--- a/pkg/rpctype/rpc.go
+++ b/pkg/rpctype/rpc.go
@@ -9,7 +9,7 @@ import (
"net/rpc"
"time"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
)
type RPCServer struct {
@@ -35,7 +35,7 @@ func (serv *RPCServer) Serve() {
for {
conn, err := serv.ln.Accept()
if err != nil {
- Logf(0, "failed to accept an rpc connection: %v", err)
+ log.Logf(0, "failed to accept an rpc connection: %v", err)
continue
}
conn.(*net.TCPConn).SetKeepAlive(true)
diff --git a/syz-ci/jobs.go b/syz-ci/jobs.go
index f334e805b..fbbfdbaea 100644
--- a/syz-ci/jobs.go
+++ b/syz-ci/jobs.go
@@ -15,7 +15,7 @@ import (
"github.com/google/syzkaller/pkg/csource"
"github.com/google/syzkaller/pkg/git"
"github.com/google/syzkaller/pkg/kernel"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/report"
"github.com/google/syzkaller/prog"
@@ -55,7 +55,7 @@ func (jp *JobProcessor) loop(stop chan struct{}) {
case <-ticker.C:
jp.poll()
case <-stop:
- Logf(0, "job loop stopped")
+ log.Logf(0, "job loop stopped")
return
}
}
@@ -89,10 +89,10 @@ func (jp *JobProcessor) poll() {
req: req,
mgr: mgr,
}
- Logf(0, "starting job %v for manager %v on %v/%v",
+ log.Logf(0, "starting job %v for manager %v on %v/%v",
req.ID, req.Manager, req.KernelRepo, req.KernelBranch)
resp := jp.process(job)
- Logf(0, "done job %v: commit %v, crash %q, error: %s",
+ log.Logf(0, "done job %v: commit %v, crash %q, error: %s",
resp.ID, resp.Build.KernelCommit, resp.CrashTitle, resp.Error)
if err := jp.dash.JobDone(resp); err != nil {
jp.Errorf("failed to mark job as done: %v", err)
@@ -163,7 +163,7 @@ func (jp *JobProcessor) process(job *Job) *dashapi.JobDoneReq {
if err = jp.test(job); err == nil {
break
}
- Logf(0, "job: testing failed, trying once again\n%v", err)
+ log.Logf(0, "job: testing failed, trying once again\n%v", err)
}
if err != nil {
job.resp.Error = []byte(err.Error())
@@ -197,13 +197,13 @@ func (jp *JobProcessor) buildImage(job *Job) error {
return fmt.Errorf("failed to create temp dir: %v", err)
}
- Logf(0, "job: fetching syzkaller on %v...", req.SyzkallerCommit)
+ log.Logf(0, "job: fetching syzkaller on %v...", req.SyzkallerCommit)
_, err := git.CheckoutCommit(syzkallerDir, jp.syzkallerRepo, req.SyzkallerCommit)
if err != nil {
return fmt.Errorf("failed to checkout syzkaller repo: %v", err)
}
- Logf(0, "job: building syzkaller...")
+ log.Logf(0, "job: building syzkaller...")
cmd := osutil.Command("make", "target")
cmd.Dir = syzkallerDir
cmd.Env = append([]string{}, os.Environ()...)
@@ -218,7 +218,7 @@ func (jp *JobProcessor) buildImage(job *Job) error {
}
resp.Build.SyzkallerCommit = req.SyzkallerCommit
- Logf(0, "job: fetching kernel...")
+ log.Logf(0, "job: fetching kernel...")
var kernelCommit *git.Commit
if git.CheckCommitHash(req.KernelBranch) {
kernelCommit, err = git.CheckoutCommit(kernelDir, req.KernelRepo, req.KernelBranch)
@@ -247,7 +247,7 @@ func (jp *JobProcessor) buildImage(job *Job) error {
}
}
- Logf(0, "job: building kernel...")
+ log.Logf(0, "job: building kernel...")
configFile := filepath.Join(dir, "kernel.config")
if err := osutil.WriteFile(configFile, req.KernelConfig); err != nil {
return fmt.Errorf("failed to write temp file: %v", err)
@@ -261,7 +261,7 @@ func (jp *JobProcessor) buildImage(job *Job) error {
}
resp.Build.KernelConfig = kernelConfig
- Logf(0, "job: creating image...")
+ log.Logf(0, "job: creating image...")
image := filepath.Join(imageDir, "image")
key := filepath.Join(imageDir, "key")
err = kernel.CreateImage(kernelDir, mgr.mgrcfg.Userspace,
@@ -294,7 +294,7 @@ func (jp *JobProcessor) buildImage(job *Job) error {
func (jp *JobProcessor) test(job *Job) error {
req, mgrcfg := job.req, job.mgrcfg
- Logf(0, "job: booting VM...")
+ log.Logf(0, "job: booting VM...")
inst, reporter, rep, err := bootInstance(mgrcfg)
if err != nil {
return err
@@ -306,7 +306,7 @@ func (jp *JobProcessor) test(job *Job) error {
}
defer inst.Close()
- Logf(0, "job: testing instance...")
+ log.Logf(0, "job: testing instance...")
rep, err = testInstance(inst, reporter, mgrcfg)
if err != nil {
return err
@@ -317,7 +317,7 @@ func (jp *JobProcessor) test(job *Job) error {
return fmt.Errorf("%v\n\n%s\n\n%s", rep.Title, rep.Report, rep.Output)
}
- Logf(0, "job: copying binaries...")
+ log.Logf(0, "job: copying binaries...")
execprogBin, err := inst.Copy(mgrcfg.SyzExecprogBin)
if err != nil {
return fmt.Errorf("failed to copy test binary to VM: %v", err)
@@ -335,7 +335,7 @@ func (jp *JobProcessor) test(job *Job) error {
return fmt.Errorf("failed to copy to VM: %v", err)
}
- Logf(0, "job: testing syzkaller program...")
+ log.Logf(0, "job: testing syzkaller program...")
opts, err := csource.DeserializeOptions(req.ReproOpts)
if err != nil {
return err
@@ -360,7 +360,7 @@ func (jp *JobProcessor) test(job *Job) error {
}
if len(req.ReproC) != 0 {
- Logf(0, "job: testing C program...")
+ log.Logf(0, "job: testing C program...")
cFile := filepath.Join(mgrcfg.Workdir, "repro.c")
if err := osutil.WriteFile(cFile, req.ReproC); err != nil {
return fmt.Errorf("failed to write temp file: %v", err)
@@ -408,7 +408,7 @@ func (jp *JobProcessor) testProgram(job *Job, inst *vm.Instance, command string,
// Errorf logs non-fatal error and sends it to dashboard.
func (jp *JobProcessor) Errorf(msg string, args ...interface{}) {
- Logf(0, "job: "+msg, args...)
+ log.Logf(0, "job: "+msg, args...)
if jp.dash != nil {
jp.dash.LogError(jp.name, msg, args...)
}
diff --git a/syz-ci/manager.go b/syz-ci/manager.go
index f916fa5f7..b7fc9ab72 100644
--- a/syz-ci/manager.go
+++ b/syz-ci/manager.go
@@ -15,7 +15,7 @@ import (
"github.com/google/syzkaller/pkg/git"
"github.com/google/syzkaller/pkg/hash"
"github.com/google/syzkaller/pkg/kernel"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/report"
"github.com/google/syzkaller/syz-manager/mgrconfig"
@@ -62,7 +62,7 @@ type Manager struct {
func createManager(cfg *Config, mgrcfg *ManagerConfig, stop chan struct{}) *Manager {
dir := osutil.Abs(filepath.Join("managers", mgrcfg.Name))
if err := osutil.MkdirAll(dir); err != nil {
- Fatal(err)
+ log.Fatal(err)
}
if mgrcfg.Repo_Alias == "" {
mgrcfg.Repo_Alias = mgrcfg.Repo
@@ -76,25 +76,25 @@ func createManager(cfg *Config, mgrcfg *ManagerConfig, stop chan struct{}) *Mana
// Assume compiler and config don't change underneath us.
compilerID, err := kernel.CompilerIdentity(mgrcfg.Compiler)
if err != nil {
- Fatal(err)
+ log.Fatal(err)
}
configData, err := ioutil.ReadFile(mgrcfg.Kernel_Config)
if err != nil {
- Fatal(err)
+ log.Fatal(err)
}
syzkallerCommit, _ := readTag(filepath.FromSlash("syzkaller/current/tag"))
if syzkallerCommit == "" {
- Fatalf("no tag in syzkaller/current/tag")
+ log.Fatalf("no tag in syzkaller/current/tag")
}
// Prepare manager config skeleton (other fields are filled in writeConfig).
managercfg := mgrconfig.DefaultValues()
if err := config.LoadData(mgrcfg.Manager_Config, managercfg); err != nil {
- Fatalf("failed to load manager %v config: %v", mgrcfg.Name, err)
+ log.Fatalf("failed to load manager %v config: %v", mgrcfg.Name, err)
}
managercfg.TargetOS, managercfg.TargetVMArch, managercfg.TargetArch, err = mgrconfig.SplitTarget(managercfg.Target)
if err != nil {
- Fatalf("failed to load manager %v config: %v", mgrcfg.Name, err)
+ log.Fatalf("failed to load manager %v config: %v", mgrcfg.Name, err)
}
managercfg.Name = cfg.Name + "-" + mgrcfg.Name
@@ -130,12 +130,12 @@ func (mgr *Manager) loop() {
if latestInfo != nil && time.Since(latestInfo.Time) < kernelRebuildPeriod/2 {
// If we have a reasonably fresh build,
// start manager straight away and don't rebuild kernel for a while.
- Logf(0, "%v: using latest image built on %v", mgr.name, latestInfo.KernelCommit)
+ log.Logf(0, "%v: using latest image built on %v", mgr.name, latestInfo.KernelCommit)
managerRestartTime = latestInfo.Time
nextBuildTime = time.Now().Add(kernelRebuildPeriod)
mgr.restartManager()
} else if latestInfo != nil {
- Logf(0, "%v: latest image is on %v", mgr.name, latestInfo.KernelCommit)
+ log.Logf(0, "%v: latest image is on %v", mgr.name, latestInfo.KernelCommit)
}
ticker := time.NewTicker(buildRetryPeriod)
@@ -149,7 +149,7 @@ loop:
if err != nil {
mgr.Errorf("failed to poll: %v", err)
} else {
- Logf(0, "%v: poll: %v", mgr.name, commit.Hash)
+ log.Logf(0, "%v: poll: %v", mgr.name, commit.Hash)
if commit.Hash != lastCommit &&
(latestInfo == nil ||
commit.Hash != latestInfo.KernelCommit ||
@@ -158,11 +158,11 @@ loop:
lastCommit = commit.Hash
select {
case kernelBuildSem <- struct{}{}:
- Logf(0, "%v: building kernel...", mgr.name)
+ log.Logf(0, "%v: building kernel...", mgr.name)
if err := mgr.build(); err != nil {
- Logf(0, "%v: %v", mgr.name, err)
+ log.Logf(0, "%v: %v", mgr.name, err)
} else {
- Logf(0, "%v: build successful, [re]starting manager", mgr.name)
+ log.Logf(0, "%v: build successful, [re]starting manager", mgr.name)
rebuildAfter = kernelRebuildPeriod
latestInfo = mgr.checkLatest()
if latestInfo == nil {
@@ -200,7 +200,7 @@ loop:
mgr.cmd.Close()
mgr.cmd = nil
}
- Logf(0, "%v: stopped", mgr.name)
+ log.Logf(0, "%v: stopped", mgr.name)
}
// BuildInfo characterizes a kernel build.
@@ -347,7 +347,7 @@ func (mgr *Manager) restartManager() {
}
func (mgr *Manager) testImage(imageDir string, info *BuildInfo) error {
- Logf(0, "%v: testing image...", mgr.name)
+ log.Logf(0, "%v: testing image...", mgr.name)
mgrcfg, err := mgr.createTestConfig(imageDir, info)
if err != nil {
return fmt.Errorf("failed to create manager config: %v", err)
@@ -391,7 +391,7 @@ func (mgr *Manager) testImage(imageDir string, info *BuildInfo) error {
func (mgr *Manager) reportBuildError(rep *report.Report, info *BuildInfo, imageDir string) error {
if mgr.dash == nil {
- Logf(0, "%v: image testing failed: %v\n\n%s\n\n%s\n",
+ log.Logf(0, "%v: image testing failed: %v\n\n%s\n\n%s\n",
mgr.name, rep.Title, rep.Report, rep.Output)
return nil
}
@@ -571,7 +571,7 @@ func (mgr *Manager) pollCommits(buildCommit string) ([]string, []dashapi.FixComm
// Errorf logs non-fatal error and sends it to dashboard.
func (mgr *Manager) Errorf(msg string, args ...interface{}) {
- Logf(0, mgr.name+": "+msg, args...)
+ log.Logf(0, mgr.name+": "+msg, args...)
if mgr.dash != nil {
mgr.dash.LogError(mgr.name, msg, args...)
}
diff --git a/syz-ci/managercmd.go b/syz-ci/managercmd.go
index 143eb8011..74f99307f 100644
--- a/syz-ci/managercmd.go
+++ b/syz-ci/managercmd.go
@@ -9,7 +9,7 @@ import (
"syscall"
"time"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
)
@@ -88,7 +88,7 @@ func (mc *ManagerCmd) loop() {
mc.errorf("failed to start manager: %v", err)
cmd = nil
} else {
- Logf(1, "%v: started manager", mc.name)
+ log.Logf(1, "%v: started manager", mc.name)
go func() {
stopped <- cmd.Wait()
}()
@@ -98,7 +98,7 @@ func (mc *ManagerCmd) loop() {
} else {
// cmd is running
if closing == nil && time.Since(interrupted) > interruptTimeout {
- Logf(1, "%v: killing manager", mc.name)
+ log.Logf(1, "%v: killing manager", mc.name)
cmd.Process.Kill()
interrupted = time.Now()
}
@@ -108,7 +108,7 @@ func (mc *ManagerCmd) loop() {
case <-closing:
closing = nil
if cmd != nil {
- Logf(1, "%v: stopping manager", mc.name)
+ log.Logf(1, "%v: stopping manager", mc.name)
cmd.Process.Signal(syscall.SIGINT)
interrupted = time.Now()
}
@@ -120,7 +120,7 @@ func (mc *ManagerCmd) loop() {
mc.errorf("manager exited unexpectedly: %v", err)
}
cmd = nil
- Logf(1, "%v: manager exited with %v", mc.name, err)
+ log.Logf(1, "%v: manager exited with %v", mc.name, err)
case <-ticker1.C:
case <-ticker2.C:
}
diff --git a/syz-ci/syz-ci.go b/syz-ci/syz-ci.go
index 7a404279b..9c68b7eb8 100644
--- a/syz-ci/syz-ci.go
+++ b/syz-ci/syz-ci.go
@@ -60,7 +60,7 @@ import (
"sync"
"github.com/google/syzkaller/pkg/config"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/syz-manager/mgrconfig"
)
@@ -100,10 +100,10 @@ type ManagerConfig struct {
func main() {
flag.Parse()
- EnableLogCaching(1000, 1<<20)
+ log.EnableLogCaching(1000, 1<<20)
cfg, err := loadConfig(*flagConfig)
if err != nil {
- Fatalf("failed to load config: %v", err)
+ log.Fatalf("failed to load config: %v", err)
}
shutdownPending := make(chan struct{})
diff --git a/syz-ci/syzupdater.go b/syz-ci/syzupdater.go
index 3ecd6bf5c..4cdb2d73d 100644
--- a/syz-ci/syzupdater.go
+++ b/syz-ci/syzupdater.go
@@ -14,7 +14,7 @@ import (
"github.com/google/syzkaller/pkg/config"
"github.com/google/syzkaller/pkg/git"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/syz-manager/mgrconfig"
)
@@ -46,7 +46,7 @@ type SyzUpdater struct {
func NewSyzUpdater(cfg *Config) *SyzUpdater {
wd, err := os.Getwd()
if err != nil {
- Fatalf("failed to get wd: %v", err)
+ log.Fatalf("failed to get wd: %v", err)
}
bin := os.Args[0]
if !filepath.IsAbs(bin) {
@@ -55,7 +55,7 @@ func NewSyzUpdater(cfg *Config) *SyzUpdater {
bin = filepath.Clean(bin)
exe := filepath.Base(bin)
if wd != filepath.Dir(bin) {
- Fatalf("%v executable must be in cwd (it will be overwritten on update)", exe)
+ log.Fatalf("%v executable must be in cwd (it will be overwritten on update)", exe)
}
gopath := filepath.Join(wd, "gopath")
@@ -75,11 +75,11 @@ func NewSyzUpdater(cfg *Config) *SyzUpdater {
for _, mgr := range cfg.Managers {
mgrcfg := new(mgrconfig.Config)
if err := config.LoadData(mgr.Manager_Config, mgrcfg); err != nil {
- Fatalf("failed to load manager %v config: %v", mgr.Name, err)
+ log.Fatalf("failed to load manager %v config: %v", mgr.Name, err)
}
os, vmarch, arch, err := mgrconfig.SplitTarget(mgrcfg.Target)
if err != nil {
- Fatalf("failed to load manager %v config: %v", mgr.Name, err)
+ log.Fatalf("failed to load manager %v config: %v", mgr.Name, err)
}
targets[os+"/"+vmarch+"/"+arch] = true
files[fmt.Sprintf("bin/%v_%v/syz-fuzzer", os, vmarch)] = true
@@ -114,17 +114,17 @@ func (upd *SyzUpdater) UpdateOnStart(shutdown chan struct{}) {
latestTag := upd.checkLatest()
if exeTag == latestTag && time.Since(exeMod) < time.Minute {
// Have a freash up-to-date build, probably just restarted.
- Logf(0, "current executable is up-to-date (%v)", exeTag)
+ log.Logf(0, "current executable is up-to-date (%v)", exeTag)
if err := osutil.LinkFiles(upd.latestDir, upd.currentDir, upd.syzFiles); err != nil {
- Fatal(err)
+ log.Fatal(err)
}
return
}
if exeTag == "" {
- Logf(0, "current executable is bootstrap")
+ log.Logf(0, "current executable is bootstrap")
} else {
- Logf(0, "current executable is on %v", exeTag)
- Logf(0, "latest syzkaller build is on %v", latestTag)
+ log.Logf(0, "current executable is on %v", exeTag)
+ log.Logf(0, "latest syzkaller build is on %v", latestTag)
}
// No syzkaller build or executable is stale.
@@ -135,9 +135,9 @@ func (upd *SyzUpdater) UpdateOnStart(shutdown chan struct{}) {
if latestTag != "" {
// The build was successful or we had the latest build from previous runs.
// Either way, use the latest build.
- Logf(0, "using syzkaller built on %v", latestTag)
+ log.Logf(0, "using syzkaller built on %v", latestTag)
if err := osutil.LinkFiles(upd.latestDir, upd.currentDir, upd.syzFiles); err != nil {
- Fatal(err)
+ log.Fatal(err)
}
if exeTag != latestTag {
upd.UpdateAndRestart()
@@ -146,7 +146,7 @@ func (upd *SyzUpdater) UpdateOnStart(shutdown chan struct{}) {
}
// No good build at all, try again later.
- Logf(0, "retrying in %v", buildRetryPeriod)
+ log.Logf(0, "retrying in %v", buildRetryPeriod)
select {
case <-time.After(buildRetryPeriod):
case <-shutdown:
@@ -168,38 +168,38 @@ func (upd *SyzUpdater) WaitForUpdate() {
}
time.Sleep(buildRetryPeriod)
}
- Logf(0, "syzkaller: update available, restarting")
+ log.Logf(0, "syzkaller: update available, restarting")
}
// UpdateAndRestart updates and restarts the current executable.
// Does not return.
func (upd *SyzUpdater) UpdateAndRestart() {
- Logf(0, "restarting executable for update")
+ log.Logf(0, "restarting executable for update")
latestBin := filepath.Join(upd.latestDir, "bin", upd.exe)
latestTag := filepath.Join(upd.latestDir, "tag")
if err := osutil.CopyFile(latestBin, upd.exe); err != nil {
- Fatal(err)
+ log.Fatal(err)
}
if err := osutil.CopyFile(latestTag, upd.exe+".tag"); err != nil {
- Fatal(err)
+ log.Fatal(err)
}
if err := syscall.Exec(upd.exe, os.Args, os.Environ()); err != nil {
- Fatal(err)
+ log.Fatal(err)
}
- Fatalf("not reachable")
+ log.Fatalf("not reachable")
}
func (upd *SyzUpdater) pollAndBuild(lastCommit string) string {
commit, err := git.Poll(upd.syzkallerDir, upd.repo, upd.branch)
if err != nil {
- Logf(0, "syzkaller: failed to poll: %v", err)
+ log.Logf(0, "syzkaller: failed to poll: %v", err)
} else {
- Logf(0, "syzkaller: poll: %v (%v)", commit.Hash, commit.Title)
+ log.Logf(0, "syzkaller: poll: %v (%v)", commit.Hash, commit.Title)
if lastCommit != commit.Hash {
- Logf(0, "syzkaller: building ...")
+ log.Logf(0, "syzkaller: building ...")
lastCommit = commit.Hash
if err := upd.build(); err != nil {
- Logf(0, "syzkaller: %v", err)
+ log.Logf(0, "syzkaller: %v", err)
}
}
}
diff --git a/syz-ci/testing.go b/syz-ci/testing.go
index f1040155f..6806f371d 100644
--- a/syz-ci/testing.go
+++ b/syz-ci/testing.go
@@ -10,7 +10,7 @@ import (
"sync/atomic"
"time"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/report"
"github.com/google/syzkaller/syz-manager/mgrconfig"
"github.com/google/syzkaller/vm"
@@ -42,7 +42,7 @@ func bootInstance(mgrcfg *mgrconfig.Config) (*vm.Instance, report.Reporter, *rep
}
if err := reporter.Symbolize(rep); err != nil {
// TODO(dvyukov): send such errors to dashboard.
- Logf(0, "failed to symbolize report: %v", err)
+ log.Logf(0, "failed to symbolize report: %v", err)
}
return nil, nil, rep, nil
}
@@ -92,7 +92,7 @@ func testInstance(inst *vm.Instance, reporter report.Reporter, mgrcfg *mgrconfig
if rep != nil {
if err := reporter.Symbolize(rep); err != nil {
// TODO(dvyukov): send such errors to dashboard.
- Logf(0, "failed to symbolize report: %v", err)
+ log.Logf(0, "failed to symbolize report: %v", err)
}
return rep, nil
}
diff --git a/syz-fuzzer/fuzzer.go b/syz-fuzzer/fuzzer.go
index 245a65ca1..76ccd64e5 100644
--- a/syz-fuzzer/fuzzer.go
+++ b/syz-fuzzer/fuzzer.go
@@ -20,9 +20,9 @@ import (
"github.com/google/syzkaller/pkg/hash"
"github.com/google/syzkaller/pkg/host"
"github.com/google/syzkaller/pkg/ipc"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
- . "github.com/google/syzkaller/pkg/rpctype"
+ "github.com/google/syzkaller/pkg/rpctype"
"github.com/google/syzkaller/pkg/signal"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys"
@@ -39,7 +39,7 @@ type Fuzzer struct {
needPoll chan struct{}
choiceTable *prog.ChoiceTable
stats [StatCount]uint64
- manager *RPCClient
+ manager *rpctype.RPCClient
target *prog.Target
faultInjectionEnabled bool
@@ -122,11 +122,11 @@ func main() {
fmt.Fprintf(os.Stderr, "-output flag must be one of none/stdout/dmesg/file\n")
os.Exit(1)
}
- Logf(0, "fuzzer started")
+ log.Logf(0, "fuzzer started")
target, err := prog.GetTarget(runtime.GOOS, *flagArch)
if err != nil {
- Fatalf("%v", err)
+ log.Fatalf("%v", err)
}
config, execOpts, err := ipc.DefaultConfig()
@@ -145,7 +145,7 @@ func main() {
go func() {
// Handles graceful preemption on GCE.
<-shutdown
- Logf(0, "SYZ-FUZZER: PREEMPTED")
+ log.Logf(0, "SYZ-FUZZER: PREEMPTED")
os.Exit(1)
}()
@@ -157,16 +157,16 @@ func main() {
if *flagPprof != "" {
go func() {
err := http.ListenAndServe(*flagPprof, nil)
- Fatalf("failed to serve pprof profiles: %v", err)
+ log.Fatalf("failed to serve pprof profiles: %v", err)
}()
} else {
runtime.MemProfileRate = 0
}
- Logf(0, "dialing manager at %v", *flagManager)
- a := &ConnectArgs{Name: *flagName}
- r := &ConnectRes{}
- if err := RPCCall(*flagManager, "Manager.Connect", a, r); err != nil {
+ log.Logf(0, "dialing manager at %v", *flagManager)
+ a := &rpctype.ConnectArgs{Name: *flagName}
+ r := &rpctype.ConnectRes{}
+ if err := rpctype.RPCCall(*flagManager, "Manager.Connect", a, r); err != nil {
panic(err)
}
calls, disabled := buildCallList(target, r.EnabledCalls, sandbox)
@@ -193,7 +193,7 @@ func main() {
coverageEnabled := config.Flags&ipc.FlagSignal != 0
kcov, comparisonTracingEnabled := checkCompsSupported()
- Logf(0, "kcov=%v, comps=%v", kcov, comparisonTracingEnabled)
+ log.Logf(0, "kcov=%v, comps=%v", kcov, comparisonTracingEnabled)
if r.NeedCheck {
out, err := osutil.RunCmd(time.Minute, "", config.Executor, "version")
if err != nil {
@@ -203,7 +203,7 @@ func main() {
if len(vers) != 4 {
panic(fmt.Sprintf("bad executor version: %q", string(out)))
}
- a := &CheckArgs{
+ a := &rpctype.CheckArgs{
Name: *flagName,
UserNamespaces: osutil.IsExist("/proc/self/ns/user"),
FuzzerGitRev: sys.GitRevision,
@@ -223,7 +223,7 @@ func main() {
for c := range calls {
a.Calls = append(a.Calls, c.Name)
}
- if err := RPCCall(*flagManager, "Manager.Check", a, nil); err != nil {
+ if err := rpctype.RPCCall(*flagManager, "Manager.Check", a, nil); err != nil {
panic(err)
}
}
@@ -232,7 +232,7 @@ func main() {
// So we do the call on a transient connection, free all memory and reconnect.
// The rest of rpc requests have bounded size.
debug.FreeOSMemory()
- manager, err := NewRPCClient(*flagManager)
+ manager, err := rpctype.NewRPCClient(*flagManager)
if err != nil {
panic(err)
}
@@ -288,7 +288,7 @@ func main() {
for pid := 0; pid < *flagProcs; pid++ {
proc, err := newProc(fuzzer, pid)
if err != nil {
- Fatalf("failed to create proc: %v", err)
+ log.Fatalf("failed to create proc: %v", err)
}
fuzzer.procs = append(fuzzer.procs, proc)
go proc.loop()
@@ -311,7 +311,7 @@ func (fuzzer *Fuzzer) pollLoop() {
}
if fuzzer.outputType != OutputStdout && time.Since(lastPrint) > 10*time.Second {
// Keep-alive for manager.
- Logf(0, "alive, executed %v", execTotal)
+ log.Logf(0, "alive, executed %v", execTotal)
lastPrint = time.Now()
}
if poll || time.Since(lastPoll) > 10*time.Second {
@@ -320,7 +320,7 @@ func (fuzzer *Fuzzer) pollLoop() {
continue
}
- a := &PollArgs{
+ a := &rpctype.PollArgs{
Name: fuzzer.name,
NeedCandidates: needCandidates,
Stats: make(map[string]uint64),
@@ -337,12 +337,12 @@ func (fuzzer *Fuzzer) pollLoop() {
execTotal += v
}
- r := &PollRes{}
+ r := &rpctype.PollRes{}
if err := fuzzer.manager.Call("Manager.Poll", a, r); err != nil {
panic(err)
}
maxSignal := r.MaxSignal.Deserialize()
- Logf(1, "poll: candidates=%v inputs=%v signal=%v",
+ log.Logf(1, "poll: candidates=%v inputs=%v signal=%v",
len(r.Candidates), len(r.NewInputs), maxSignal.Len())
fuzzer.addMaxSignal(maxSignal)
for _, inp := range r.NewInputs {
@@ -381,24 +381,24 @@ func (fuzzer *Fuzzer) pollLoop() {
}
}
-func buildCallList(target *prog.Target, enabledCalls []int, sandbox string) (map[*prog.Syscall]bool, []SyscallReason) {
+func buildCallList(target *prog.Target, enabledCalls []int, sandbox string) (map[*prog.Syscall]bool, []rpctype.SyscallReason) {
calls := make(map[*prog.Syscall]bool)
for _, n := range enabledCalls {
if n >= len(target.Syscalls) {
- Fatalf("invalid enabled syscall: %v", n)
+ log.Fatalf("invalid enabled syscall: %v", n)
}
calls[target.Syscalls[n]] = true
}
- var disabled []SyscallReason
+ var disabled []rpctype.SyscallReason
_, unsupported, err := host.DetectSupportedSyscalls(target, sandbox)
if err != nil {
- Fatalf("failed to detect host supported syscalls: %v", err)
+ log.Fatalf("failed to detect host supported syscalls: %v", err)
}
for c := range calls {
if reason, ok := unsupported[c]; ok {
- Logf(1, "unsupported syscall: %v: %v", c.Name, reason)
- disabled = append(disabled, SyscallReason{
+ log.Logf(1, "unsupported syscall: %v: %v", c.Name, reason)
+ disabled = append(disabled, rpctype.SyscallReason{
Name: c.Name,
Reason: reason,
})
@@ -408,8 +408,8 @@ func buildCallList(target *prog.Target, enabledCalls []int, sandbox string) (map
_, unsupported = target.TransitivelyEnabledCalls(calls)
for c := range calls {
if reason, ok := unsupported[c]; ok {
- Logf(1, "transitively unsupported: %v: %v", c.Name, reason)
- disabled = append(disabled, SyscallReason{
+ log.Logf(1, "transitively unsupported: %v: %v", c.Name, reason)
+ disabled = append(disabled, rpctype.SyscallReason{
Name: c.Name,
Reason: reason,
})
@@ -419,8 +419,8 @@ func buildCallList(target *prog.Target, enabledCalls []int, sandbox string) (map
return calls, disabled
}
-func (fuzzer *Fuzzer) sendInputToManager(inp RPCInput) {
- a := &NewInputArgs{
+func (fuzzer *Fuzzer) sendInputToManager(inp rpctype.RPCInput) {
+ a := &rpctype.NewInputArgs{
Name: fuzzer.name,
RPCInput: inp,
}
@@ -429,7 +429,7 @@ func (fuzzer *Fuzzer) sendInputToManager(inp RPCInput) {
}
}
-func (fuzzer *Fuzzer) addInputFromAnotherFuzzer(inp RPCInput) {
+func (fuzzer *Fuzzer) addInputFromAnotherFuzzer(inp rpctype.RPCInput) {
if !fuzzer.coverageEnabled {
panic("should not be called when coverage is disabled")
}
diff --git a/syz-fuzzer/proc.go b/syz-fuzzer/proc.go
index bcbbd4f2c..20f11fb2e 100644
--- a/syz-fuzzer/proc.go
+++ b/syz-fuzzer/proc.go
@@ -16,8 +16,8 @@ import (
"github.com/google/syzkaller/pkg/cover"
"github.com/google/syzkaller/pkg/hash"
"github.com/google/syzkaller/pkg/ipc"
- . "github.com/google/syzkaller/pkg/log"
- . "github.com/google/syzkaller/pkg/rpctype"
+ "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/rpctype"
"github.com/google/syzkaller/pkg/signal"
"github.com/google/syzkaller/prog"
)
@@ -85,20 +85,20 @@ func (proc *Proc) loop() {
if len(corpus) == 0 || i%100 == 0 {
// Generate a new prog.
p := proc.fuzzer.target.Generate(proc.rnd, programLength, ct)
- Logf(1, "#%v: generated", proc.pid)
+ log.Logf(1, "#%v: generated", proc.pid)
proc.execute(proc.execOpts, p, ProgNormal, StatGenerate)
} else {
// Mutate an existing prog.
p := corpus[proc.rnd.Intn(len(corpus))].Clone()
p.Mutate(proc.rnd, programLength, ct, corpus)
- Logf(1, "#%v: mutated", proc.pid)
+ log.Logf(1, "#%v: mutated", proc.pid)
proc.execute(proc.execOpts, p, ProgNormal, StatFuzz)
}
}
}
func (proc *Proc) triageInput(item *WorkTriage) {
- Logf(1, "#%v: triaging type=%x", proc.pid, item.flags)
+ log.Logf(1, "#%v: triaging type=%x", proc.pid, item.flags)
if !proc.fuzzer.coverageEnabled {
panic("should not be called when coverage is disabled")
}
@@ -109,7 +109,7 @@ func (proc *Proc) triageInput(item *WorkTriage) {
if newSignal.Empty() {
return
}
- Logf(3, "triaging input for %v (new signal=%v)", call.Meta.CallName, newSignal.Len())
+ log.Logf(3, "triaging input for %v (new signal=%v)", call.Meta.CallName, newSignal.Len())
var inputCover cover.Cover
const (
signalRuns = 3
@@ -165,8 +165,8 @@ func (proc *Proc) triageInput(item *WorkTriage) {
data := item.p.Serialize()
sig := hash.Hash(data)
- Logf(2, "added new input for %v to corpus:\n%s", call.Meta.CallName, data)
- proc.fuzzer.sendInputToManager(RPCInput{
+ log.Logf(2, "added new input for %v to corpus:\n%s", call.Meta.CallName, data)
+ proc.fuzzer.sendInputToManager(rpctype.RPCInput{
Call: call.Meta.CallName,
Prog: data,
Signal: inputSignal.Serialize(),
@@ -191,14 +191,14 @@ func (proc *Proc) smashInput(item *WorkSmash) {
for i := 0; i < 100; i++ {
p := item.p.Clone()
p.Mutate(proc.rnd, programLength, proc.fuzzer.choiceTable, corpus)
- Logf(1, "#%v: smash mutated", proc.pid)
+ log.Logf(1, "#%v: smash mutated", proc.pid)
proc.execute(proc.execOpts, p, ProgNormal, StatSmash)
}
}
func (proc *Proc) failCall(p *prog.Prog, call int) {
for nth := 0; nth < 100; nth++ {
- Logf(1, "#%v: injecting fault into call %v/%v", proc.pid, call, nth)
+ log.Logf(1, "#%v: injecting fault into call %v/%v", proc.pid, call, nth)
opts := *proc.execOpts
opts.Flags |= ipc.FlagInjectFault
opts.FaultCall = call
@@ -211,7 +211,7 @@ func (proc *Proc) failCall(p *prog.Prog, call int) {
}
func (proc *Proc) executeHintSeed(p *prog.Prog, call int) {
- Logf(1, "#%v: collecting comparisons", proc.pid)
+ log.Logf(1, "#%v: collecting comparisons", proc.pid)
// First execute the original program to dump comparisons from KCOV.
info := proc.execute(proc.execOptsComps, p, ProgNormal, StatSeed)
if info == nil {
@@ -222,7 +222,7 @@ func (proc *Proc) executeHintSeed(p *prog.Prog, call int) {
// a syscall argument and a comparison operand.
// Execute each of such mutants to check if it gives new coverage.
p.MutateWithHints(call, info[call].Comps, func(p *prog.Prog) {
- Logf(1, "#%v: executing comparison hint", proc.pid)
+ log.Logf(1, "#%v: executing comparison hint", proc.pid)
proc.execute(proc.execOpts, p, ProgNormal, StatHint)
})
}
@@ -262,7 +262,7 @@ retry:
output, info, failed, hanged, err := proc.env.Exec(opts, p)
if failed {
// BUG in output should be recognized by manager.
- Logf(0, "BUG: executor-detected bug:\n%s", output)
+ log.Logf(0, "BUG: executor-detected bug:\n%s", output)
// Don't return any cover so that the input is not added to corpus.
return nil
}
@@ -271,12 +271,12 @@ retry:
panic(err)
}
try++
- Logf(4, "fuzzer detected executor failure='%v', retrying #%d\n", err, (try + 1))
+ log.Logf(4, "fuzzer detected executor failure='%v', retrying #%d\n", err, (try + 1))
debug.FreeOSMemory()
time.Sleep(time.Second)
goto retry
}
- Logf(2, "result failed=%v hanged=%v: %v\n", failed, hanged, string(output))
+ log.Logf(2, "result failed=%v hanged=%v: %v\n", failed, hanged, string(output))
return info
}
@@ -296,7 +296,7 @@ func (proc *Proc) logProgram(opts *ipc.ExecOpts, p *prog.Prog) {
switch proc.fuzzer.outputType {
case OutputStdout:
proc.fuzzer.logMu.Lock()
- Logf(0, "executing program %v%v:\n%s\n", proc.pid, strOpts, data)
+ log.Logf(0, "executing program %v%v:\n%s\n", proc.pid, strOpts, data)
proc.fuzzer.logMu.Unlock()
case OutputDmesg:
fd, err := syscall.Open("/dev/kmsg", syscall.O_WRONLY, 0)
diff --git a/syz-fuzzer/testing.go b/syz-fuzzer/testing.go
index 57906c407..604f33051 100644
--- a/syz-fuzzer/testing.go
+++ b/syz-fuzzer/testing.go
@@ -8,64 +8,64 @@ import (
"github.com/google/syzkaller/pkg/host"
"github.com/google/syzkaller/pkg/ipc"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/prog"
)
func testImage(hostAddr string, target *prog.Target, sandbox string) {
- Logf(0, "connecting to host at %v", hostAddr)
+ log.Logf(0, "connecting to host at %v", hostAddr)
conn, err := net.Dial("tcp", hostAddr)
if err != nil {
- Fatalf("failed to connect: %v", err)
+ log.Fatalf("failed to connect: %v", err)
}
conn.Close()
- Logf(0, "checking config...")
+ log.Logf(0, "checking config...")
config, execOpts, err := ipc.DefaultConfig()
if err != nil {
- Fatalf("failed to create ipc config: %v", err)
+ log.Fatalf("failed to create ipc config: %v", err)
}
if kcov, _ := checkCompsSupported(); !kcov && config.Flags&ipc.FlagSignal != 0 {
- Fatalf("coverage is not supported by kernel")
+ log.Fatalf("coverage is not supported by kernel")
}
if config.Flags&ipc.FlagSandboxNamespace != 0 && !osutil.IsExist("/proc/self/ns/user") {
- Fatalf("/proc/self/ns/user is not present for namespace sandbox")
+ log.Fatalf("/proc/self/ns/user is not present for namespace sandbox")
}
calls, _, err := host.DetectSupportedSyscalls(target, sandbox)
if err != nil {
- Fatalf("failed to detect supported syscalls: %v", err)
+ log.Fatalf("failed to detect supported syscalls: %v", err)
}
calls, _ = target.TransitivelyEnabledCalls(calls)
- Logf(0, "enabled syscalls: %v", len(calls))
+ log.Logf(0, "enabled syscalls: %v", len(calls))
if calls[target.SyscallMap["syz_emit_ethernet"]] ||
calls[target.SyscallMap["syz_extract_tcp_res"]] {
config.Flags |= ipc.FlagEnableTun
}
- Logf(0, "testing simple program...")
+ log.Logf(0, "testing simple program...")
env, err := ipc.MakeEnv(config, 0)
if err != nil {
- Fatalf("failed to create ipc env: %v", err)
+ log.Fatalf("failed to create ipc env: %v", err)
}
p := target.GenerateSimpleProg()
output, info, failed, hanged, err := env.Exec(execOpts, p)
if err != nil {
- Fatalf("execution failed: %v\n%s", err, output)
+ log.Fatalf("execution failed: %v\n%s", err, output)
}
if hanged {
- Fatalf("program hanged:\n%s", output)
+ log.Fatalf("program hanged:\n%s", output)
}
if failed {
- Fatalf("program failed:\n%s", output)
+ log.Fatalf("program failed:\n%s", output)
}
if len(info) == 0 {
- Fatalf("no calls executed:\n%s", output)
+ log.Fatalf("no calls executed:\n%s", output)
}
if info[0].Errno != 0 {
- Fatalf("simple call failed: %v\n%s", info[0].Errno, output)
+ log.Fatalf("simple call failed: %v\n%s", info[0].Errno, output)
}
if config.Flags&ipc.FlagSignal != 0 && len(info[0].Signal) == 0 {
- Fatalf("got no coverage:\n%s", output)
+ log.Fatalf("got no coverage:\n%s", output)
}
}
diff --git a/syz-hub/http.go b/syz-hub/http.go
index f7a01c91b..9887015e0 100644
--- a/syz-hub/http.go
+++ b/syz-hub/http.go
@@ -11,7 +11,7 @@ import (
"sort"
"strings"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
)
func (hub *Hub) initHTTP(addr string) {
@@ -19,12 +19,12 @@ func (hub *Hub) initHTTP(addr string) {
ln, err := net.Listen("tcp4", addr)
if err != nil {
- Fatalf("failed to listen on %v: %v", addr, err)
+ log.Fatalf("failed to listen on %v: %v", addr, err)
}
- Logf(0, "serving http on http://%v", ln.Addr())
+ log.Logf(0, "serving http on http://%v", ln.Addr())
go func() {
err := http.Serve(ln, nil)
- Fatalf("failed to serve http: %v", err)
+ log.Fatalf("failed to serve http: %v", err)
}()
}
@@ -33,7 +33,7 @@ func (hub *Hub) httpSummary(w http.ResponseWriter, r *http.Request) {
defer hub.mu.Unlock()
data := &UISummaryData{
- Log: CachedLogOutput(),
+ Log: log.CachedLogOutput(),
}
total := UIManager{
Name: "total",
@@ -59,7 +59,7 @@ func (hub *Hub) httpSummary(w http.ResponseWriter, r *http.Request) {
sort.Sort(UIManagerArray(data.Managers))
data.Managers = append([]UIManager{total}, data.Managers...)
if err := summaryTemplate.Execute(w, data); err != nil {
- Logf(0, "failed to execute template: %v", err)
+ log.Logf(0, "failed to execute template: %v", err)
http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError)
return
}
diff --git a/syz-hub/hub.go b/syz-hub/hub.go
index f8509f3a3..259265576 100644
--- a/syz-hub/hub.go
+++ b/syz-hub/hub.go
@@ -10,8 +10,8 @@ import (
"sync"
"github.com/google/syzkaller/pkg/config"
- . "github.com/google/syzkaller/pkg/log"
- . "github.com/google/syzkaller/pkg/rpctype"
+ "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/rpctype"
"github.com/google/syzkaller/syz-hub/state"
)
@@ -39,13 +39,13 @@ func main() {
flag.Parse()
cfg := new(Config)
if err := config.LoadFile(*flagConfig, cfg); err != nil {
- Fatal(err)
+ log.Fatal(err)
}
- EnableLogCaching(1000, 1<<20)
+ log.EnableLogCaching(1000, 1<<20)
st, err := state.Make(cfg.Workdir)
if err != nil {
- Fatalf("failed to load state: %v", err)
+ log.Fatalf("failed to load state: %v", err)
}
hub := &Hub{
st: st,
@@ -57,15 +57,15 @@ func main() {
hub.initHTTP(cfg.HTTP)
- s, err := NewRPCServer(cfg.RPC, hub)
+ s, err := rpctype.NewRPCServer(cfg.RPC, hub)
if err != nil {
- Fatalf("failed to create rpc server: %v", err)
+ log.Fatalf("failed to create rpc server: %v", err)
}
- Logf(0, "serving rpc on tcp://%v", s.Addr())
+ log.Logf(0, "serving rpc on tcp://%v", s.Addr())
s.Serve()
}
-func (hub *Hub) Connect(a *HubConnectArgs, r *int) error {
+func (hub *Hub) Connect(a *rpctype.HubConnectArgs, r *int) error {
name, err := hub.auth(a.Client, a.Key, a.Manager)
if err != nil {
return err
@@ -73,16 +73,16 @@ func (hub *Hub) Connect(a *HubConnectArgs, r *int) error {
hub.mu.Lock()
defer hub.mu.Unlock()
- Logf(0, "connect from %v: fresh=%v calls=%v corpus=%v",
+ log.Logf(0, "connect from %v: fresh=%v calls=%v corpus=%v",
name, a.Fresh, len(a.Calls), len(a.Corpus))
if err := hub.st.Connect(name, a.Fresh, a.Calls, a.Corpus); err != nil {
- Logf(0, "connect error: %v", err)
+ log.Logf(0, "connect error: %v", err)
return err
}
return nil
}
-func (hub *Hub) Sync(a *HubSyncArgs, r *HubSyncRes) error {
+func (hub *Hub) Sync(a *rpctype.HubSyncArgs, r *rpctype.HubSyncRes) error {
name, err := hub.auth(a.Client, a.Key, a.Manager)
if err != nil {
return err
@@ -92,39 +92,39 @@ func (hub *Hub) Sync(a *HubSyncArgs, r *HubSyncRes) error {
progs, more, err := hub.st.Sync(name, a.Add, a.Del)
if err != nil {
- Logf(0, "sync error: %v", err)
+ log.Logf(0, "sync error: %v", err)
return err
}
r.Progs = progs
r.More = more
for _, repro := range a.Repros {
if err := hub.st.AddRepro(name, repro); err != nil {
- Logf(0, "add repro error: %v", err)
+ log.Logf(0, "add repro error: %v", err)
}
}
if a.NeedRepros {
repro, err := hub.st.PendingRepro(name)
if err != nil {
- Logf(0, "sync error: %v", err)
+ log.Logf(0, "sync error: %v", err)
}
if repro != nil {
r.Repros = [][]byte{repro}
}
}
- Logf(0, "sync from %v: recv: add=%v del=%v repros=%v; send: progs=%v repros=%v pending=%v",
+ log.Logf(0, "sync from %v: recv: add=%v del=%v repros=%v; send: progs=%v repros=%v pending=%v",
name, len(a.Add), len(a.Del), len(a.Repros), len(r.Progs), len(r.Repros), more)
return nil
}
func (hub *Hub) auth(client, key, manager string) (string, error) {
if expectedKey, ok := hub.keys[client]; !ok || key != expectedKey {
- Logf(0, "connect from unauthorized client %v", client)
+ log.Logf(0, "connect from unauthorized client %v", client)
return "", fmt.Errorf("unauthorized manager")
}
if manager == "" {
manager = client
} else if !strings.HasPrefix(manager, client) {
- Logf(0, "manager %v does not have client prefix %v", manager, client)
+ log.Logf(0, "manager %v does not have client prefix %v", manager, client)
return "", fmt.Errorf("unauthorized manager")
}
return manager, nil
diff --git a/syz-hub/state/state.go b/syz-hub/state/state.go
index d04377166..cbeaff9f0 100644
--- a/syz-hub/state/state.go
+++ b/syz-hub/state/state.go
@@ -14,7 +14,7 @@ import (
"github.com/google/syzkaller/pkg/db"
"github.com/google/syzkaller/pkg/hash"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/prog"
)
@@ -73,29 +73,29 @@ func Make(dir string) (*State, error) {
return nil, err
}
}
- Logf(0, "purging corpus...")
+ log.Logf(0, "purging corpus...")
st.purgeCorpus()
- Logf(0, "done, %v programs", len(st.Corpus.Records))
+ log.Logf(0, "done, %v programs", len(st.Corpus.Records))
return st, err
}
func loadDB(file, name string) (*db.DB, uint64) {
- Logf(0, "reading %v...", name)
+ log.Logf(0, "reading %v...", name)
db, err := db.Open(file)
if err != nil {
- Fatalf("failed to open %v database: %v", name, err)
+ log.Fatalf("failed to open %v database: %v", name, err)
}
- Logf(0, "read %v programs", len(db.Records))
+ log.Logf(0, "read %v programs", len(db.Records))
var maxSeq uint64
for key, rec := range db.Records {
if _, err := prog.CallSet(rec.Val); err != nil {
- Logf(0, "bad file: can't parse call set: %v", err)
+ log.Logf(0, "bad file: can't parse call set: %v", err)
db.Delete(key)
continue
}
if sig := hash.Hash(rec.Val); sig.String() != key {
- Logf(0, "bad file: hash %v, want hash %v", key, sig.String())
+ log.Logf(0, "bad file: hash %v, want hash %v", key, sig.String())
db.Delete(key)
continue
}
@@ -104,7 +104,7 @@ func loadDB(file, name string) (*db.DB, uint64) {
}
}
if err := db.Flush(); err != nil {
- Fatalf("failed to flush corpus database: %v", err)
+ log.Fatalf("failed to flush corpus database: %v", err)
}
return db, maxSeq
}
@@ -135,7 +135,7 @@ func (st *State) createManager(name string) (*Manager, error) {
if err != nil {
return nil, fmt.Errorf("failed to open manager corpus %v: %v", mgr.corpusFile, err)
}
- Logf(0, "created manager %v: corpus=%v, corpusSeq=%v, reproSeq=%v",
+ log.Logf(0, "created manager %v: corpus=%v, corpusSeq=%v, reproSeq=%v",
mgr.name, len(mgr.Corpus.Records), mgr.corpusSeq, mgr.reproSeq)
st.Managers[name] = mgr
return mgr, nil
@@ -167,7 +167,7 @@ func (st *State) Connect(name string, fresh bool, calls []string, corpus [][]byt
var err error
mgr.Corpus, err = db.Open(mgr.corpusFile)
if err != nil {
- Logf(0, "failed to open corpus database: %v", err)
+ log.Logf(0, "failed to open corpus database: %v", err)
return err
}
st.addInputs(mgr, corpus)
@@ -185,7 +185,7 @@ func (st *State) Sync(name string, add [][]byte, del []string) ([][]byte, int, e
mgr.Corpus.Delete(sig)
}
if err := mgr.Corpus.Flush(); err != nil {
- Logf(0, "failed to flush corpus database: %v", err)
+ log.Logf(0, "failed to flush corpus database: %v", err)
}
st.purgeCorpus()
}
@@ -203,7 +203,7 @@ func (st *State) AddRepro(name string, repro []byte) error {
return fmt.Errorf("unconnected manager %v", name)
}
if _, err := prog.CallSet(repro); err != nil {
- Logf(0, "manager %v: failed to extract call set: %v, program:\n%v",
+ log.Logf(0, "manager %v: failed to extract call set: %v, program:\n%v",
mgr.name, err, string(repro))
return nil
}
@@ -220,7 +220,7 @@ func (st *State) AddRepro(name string, repro []byte) error {
st.reproSeq++
st.Repros.Save(sig, repro, st.reproSeq)
if err := st.Repros.Flush(); err != nil {
- Logf(0, "failed to flush repro database: %v", err)
+ log.Logf(0, "failed to flush repro database: %v", err)
}
return nil
}
@@ -319,16 +319,16 @@ func (st *State) addInputs(mgr *Manager, inputs [][]byte) {
st.addInput(mgr, input)
}
if err := mgr.Corpus.Flush(); err != nil {
- Logf(0, "failed to flush corpus database: %v", err)
+ log.Logf(0, "failed to flush corpus database: %v", err)
}
if err := st.Corpus.Flush(); err != nil {
- Logf(0, "failed to flush corpus database: %v", err)
+ log.Logf(0, "failed to flush corpus database: %v", err)
}
}
func (st *State) addInput(mgr *Manager, input []byte) {
if _, err := prog.CallSet(input); err != nil {
- Logf(0, "manager %v: failed to extract call set: %v, program:\n%v", mgr.name, err, string(input))
+ log.Logf(0, "manager %v: failed to extract call set: %v, program:\n%v", mgr.name, err, string(input))
return
}
sig := hash.String(input)
@@ -352,7 +352,7 @@ func (st *State) purgeCorpus() {
st.Corpus.Delete(key)
}
if err := st.Corpus.Flush(); err != nil {
- Logf(0, "failed to flush corpus database: %v", err)
+ log.Logf(0, "failed to flush corpus database: %v", err)
}
}
@@ -367,7 +367,7 @@ func managerSupportsAllCalls(mgr, prog map[string]struct{}) bool {
func writeFile(name string, data []byte) {
if err := osutil.WriteFile(name, data); err != nil {
- Logf(0, "failed to write file %v: %v", name, err)
+ log.Logf(0, "failed to write file %v: %v", name, err)
}
}
diff --git a/syz-manager/cover.go b/syz-manager/cover.go
index dde9ef93a..d78d1c20e 100644
--- a/syz-manager/cover.go
+++ b/syz-manager/cover.go
@@ -18,7 +18,7 @@ import (
"github.com/google/syzkaller/pkg/cover"
"github.com/google/syzkaller/pkg/hash"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/symbolizer"
)
@@ -77,12 +77,12 @@ func initAllCover(os, arch, vmlinux string) {
sort.Sort(uint64Array(pcs))
allCoverPCs = pcs
} else {
- Logf(0, "failed to run objdump on %v: %v", vmlinux, err)
+ log.Logf(0, "failed to run objdump on %v: %v", vmlinux, err)
}
allSymbols, err = symbolizer.ReadSymbols(vmlinux)
if err != nil {
- Logf(0, "failed to run nm on %v: %v", vmlinux, err)
+ log.Logf(0, "failed to run nm on %v: %v", vmlinux, err)
}
}()
}
diff --git a/syz-manager/html.go b/syz-manager/html.go
index 126ec715a..c101bb135 100644
--- a/syz-manager/html.go
+++ b/syz-manager/html.go
@@ -21,7 +21,7 @@ import (
"time"
"github.com/google/syzkaller/pkg/cover"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
)
@@ -42,19 +42,19 @@ func (mgr *Manager) initHTTP() {
ln, err := net.Listen("tcp4", mgr.cfg.HTTP)
if err != nil {
- Fatalf("failed to listen on %v: %v", mgr.cfg.HTTP, err)
+ log.Fatalf("failed to listen on %v: %v", mgr.cfg.HTTP, err)
}
- Logf(0, "serving http on http://%v", ln.Addr())
+ log.Logf(0, "serving http on http://%v", ln.Addr())
go func() {
err := http.Serve(ln, nil)
- Fatalf("failed to serve http: %v", err)
+ log.Fatalf("failed to serve http: %v", err)
}()
}
func (mgr *Manager) httpSummary(w http.ResponseWriter, r *http.Request) {
data := &UISummaryData{
Name: mgr.cfg.Name,
- Log: CachedLogOutput(),
+ Log: log.CachedLogOutput(),
Stats: mgr.collectStats(),
}
diff --git a/syz-manager/manager.go b/syz-manager/manager.go
index 0a8e27015..c185b4a79 100644
--- a/syz-manager/manager.go
+++ b/syz-manager/manager.go
@@ -23,11 +23,11 @@ import (
"github.com/google/syzkaller/pkg/db"
"github.com/google/syzkaller/pkg/gce"
"github.com/google/syzkaller/pkg/hash"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/report"
"github.com/google/syzkaller/pkg/repro"
- . "github.com/google/syzkaller/pkg/rpctype"
+ "github.com/google/syzkaller/pkg/rpctype"
"github.com/google/syzkaller/pkg/signal"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys"
@@ -68,9 +68,9 @@ type Manager struct {
enabledSyscalls []int
enabledCalls []string // as determined by fuzzer
- candidates []RPCCandidate // untriaged inputs from corpus and hub
+ candidates []rpctype.RPCCandidate // untriaged inputs from corpus and hub
disabledHashes map[string]struct{}
- corpus map[string]RPCInput
+ corpus map[string]rpctype.RPCInput
corpusCover cover.Cover
corpusSignal signal.Signal
maxSignal signal.Signal
@@ -78,7 +78,7 @@ type Manager struct {
newRepros [][]byte
fuzzers map[string]*Fuzzer
- hub *RPCClient
+ hub *rpctype.RPCClient
hubCorpus map[hash.Sig]bool
needMoreRepros chan chan bool
hubReproQueue chan *Crash
@@ -106,7 +106,7 @@ const currentDBVersion = 3
type Fuzzer struct {
name string
- inputs []RPCInput
+ inputs []rpctype.RPCInput
newMaxSignal signal.Signal
}
@@ -118,21 +118,21 @@ type Crash struct {
func main() {
if sys.GitRevision == "" {
- Fatalf("Bad syz-manager build. Build with make, run bin/syz-manager.")
+ log.Fatalf("Bad syz-manager build. Build with make, run bin/syz-manager.")
}
flag.Parse()
- EnableLogCaching(1000, 1<<20)
+ log.EnableLogCaching(1000, 1<<20)
cfg, err := mgrconfig.LoadFile(*flagConfig)
if err != nil {
- Fatalf("%v", err)
+ log.Fatalf("%v", err)
}
target, err := prog.GetTarget(cfg.TargetOS, cfg.TargetArch)
if err != nil {
- Fatalf("%v", err)
+ log.Fatalf("%v", err)
}
syscalls, err := mgrconfig.ParseEnabledSyscalls(target, cfg.Enable_Syscalls, cfg.Disable_Syscalls)
if err != nil {
- Fatalf("%v", err)
+ log.Fatalf("%v", err)
}
initAllCover(cfg.TargetOS, cfg.TargetVMArch, cfg.Vmlinux)
RunManager(cfg, target, syscalls)
@@ -148,7 +148,7 @@ func RunManager(cfg *mgrconfig.Config, target *prog.Target, syscalls map[int]boo
var err error
vmPool, err = vm.Create(cfg.Type, env)
if err != nil {
- Fatalf("%v", err)
+ log.Fatalf("%v", err)
}
}
@@ -169,7 +169,7 @@ func RunManager(cfg *mgrconfig.Config, target *prog.Target, syscalls map[int]boo
stats: make(map[string]uint64),
crashTypes: make(map[string]bool),
enabledSyscalls: enabledSyscalls,
- corpus: make(map[string]RPCInput),
+ corpus: make(map[string]rpctype.RPCInput),
disabledHashes: make(map[string]struct{}),
fuzzers: make(map[string]*Fuzzer),
fresh: true,
@@ -180,11 +180,11 @@ func RunManager(cfg *mgrconfig.Config, target *prog.Target, syscalls map[int]boo
usedFiles: make(map[string]time.Time),
}
- Logf(0, "loading corpus...")
+ log.Logf(0, "loading corpus...")
var err error
mgr.corpusDB, err = db.Open(filepath.Join(cfg.Workdir, "corpus.db"))
if err != nil {
- Fatalf("failed to open corpus database: %v", err)
+ log.Fatalf("failed to open corpus database: %v", err)
}
// By default we don't re-minimize/re-smash programs from corpus,
// it takes lots of time on start and is unnecessary.
@@ -210,7 +210,7 @@ func RunManager(cfg *mgrconfig.Config, target *prog.Target, syscalls map[int]boo
p, err := mgr.target.Deserialize(rec.Val)
if err != nil {
if deleted < 10 {
- Logf(0, "deleting broken program: %v\n%s", err, rec.Val)
+ log.Logf(0, "deleting broken program: %v\n%s", err, rec.Val)
}
mgr.corpusDB.Delete(key)
deleted++
@@ -232,14 +232,14 @@ func RunManager(cfg *mgrconfig.Config, target *prog.Target, syscalls map[int]boo
mgr.disabledHashes[hash.String(rec.Val)] = struct{}{}
continue
}
- mgr.candidates = append(mgr.candidates, RPCCandidate{
+ mgr.candidates = append(mgr.candidates, rpctype.RPCCandidate{
Prog: rec.Val,
Minimized: minimized,
Smashed: smashed,
})
}
mgr.fresh = len(mgr.corpusDB.Records) == 0
- Logf(0, "loaded %v programs (%v total, %v deleted)",
+ log.Logf(0, "loaded %v programs (%v total, %v deleted)",
len(mgr.candidates), len(mgr.corpusDB.Records), deleted)
// Now this is ugly.
@@ -260,11 +260,11 @@ func RunManager(cfg *mgrconfig.Config, target *prog.Target, syscalls map[int]boo
mgr.collectUsedFiles()
// Create RPC server for fuzzers.
- s, err := NewRPCServer(cfg.RPC, mgr)
+ s, err := rpctype.NewRPCServer(cfg.RPC, mgr)
if err != nil {
- Fatalf("failed to create rpc server: %v", err)
+ log.Fatalf("failed to create rpc server: %v", err)
}
- Logf(0, "serving rpc on tcp://%v", s.Addr())
+ log.Logf(0, "serving rpc on tcp://%v", s.Addr())
mgr.port = s.Addr().(*net.TCPAddr).Port
go s.Serve()
@@ -291,7 +291,7 @@ func RunManager(cfg *mgrconfig.Config, target *prog.Target, syscalls map[int]boo
numReproducing := atomic.LoadUint32(&mgr.numReproducing)
numFuzzing := atomic.LoadUint32(&mgr.numFuzzing)
- Logf(0, "VMs %v, executed %v, cover %v, crashes %v, repro %v",
+ log.Logf(0, "VMs %v, executed %v, cover %v, crashes %v, repro %v",
numFuzzing, executed, signal, crashes, numReproducing)
}
}()
@@ -299,7 +299,7 @@ func RunManager(cfg *mgrconfig.Config, target *prog.Target, syscalls map[int]boo
if *flagBench != "" {
f, err := os.OpenFile(*flagBench, os.O_WRONLY|os.O_CREATE|os.O_EXCL, osutil.DefaultFilePerm)
if err != nil {
- Fatalf("failed to open bench file: %v", err)
+ log.Fatalf("failed to open bench file: %v", err)
}
go func() {
for {
@@ -323,10 +323,10 @@ func RunManager(cfg *mgrconfig.Config, target *prog.Target, syscalls map[int]boo
data, err := json.MarshalIndent(vals, "", " ")
if err != nil {
- Fatalf("failed to serialize bench data")
+ log.Fatalf("failed to serialize bench data")
}
if _, err := f.Write(append(data, '\n')); err != nil {
- Fatalf("failed to write bench data")
+ log.Fatalf("failed to write bench data")
}
}
}()
@@ -347,9 +347,9 @@ func RunManager(cfg *mgrconfig.Config, target *prog.Target, syscalls map[int]boo
osutil.HandleInterrupts(vm.Shutdown)
if mgr.vmPool == nil {
- Logf(0, "no VMs started (type=none)")
- Logf(0, "you are supposed to start syz-fuzzer manually as:")
- Logf(0, "syz-fuzzer -manager=manager.ip:%v [other flags as necessary]", mgr.port)
+ log.Logf(0, "no VMs started (type=none)")
+ log.Logf(0, "you are supposed to start syz-fuzzer manually as:")
+ log.Logf(0, "syz-fuzzer -manager=manager.ip:%v [other flags as necessary]", mgr.port)
<-vm.Shutdown
return
}
@@ -371,8 +371,8 @@ type ReproResult struct {
}
func (mgr *Manager) vmLoop() {
- Logf(0, "booting test machines...")
- Logf(0, "wait for the connection from test machine...")
+ log.Logf(0, "booting test machines...")
+ log.Logf(0, "wait for the connection from test machine...")
instancesPerRepro := 4
vmCount := mgr.vmPool.Count()
if instancesPerRepro > vmCount {
@@ -413,19 +413,19 @@ func (mgr *Manager) vmLoop() {
}
needRepro, err := mgr.dash.NeedRepro(cid)
if err != nil {
- Logf(0, "dashboard.NeedRepro failed: %v", err)
+ log.Logf(0, "dashboard.NeedRepro failed: %v", err)
}
if !needRepro {
continue
}
}
}
- Logf(1, "loop: add to repro queue '%v'", crash.Title)
+ log.Logf(1, "loop: add to repro queue '%v'", crash.Title)
reproducing[crash.Title] = true
reproQueue = append(reproQueue, crash)
}
- Logf(1, "loop: phase=%v shutdown=%v instances=%v/%v %+v repro: pending=%v reproducing=%v queued=%v",
+ log.Logf(1, "loop: phase=%v shutdown=%v instances=%v/%v %+v repro: pending=%v reproducing=%v queued=%v",
phase, shutdown == nil, len(instances), vmCount, instances,
len(pendingRepro), len(reproducing), len(reproQueue))
@@ -448,7 +448,7 @@ func (mgr *Manager) vmLoop() {
instances = instances[:len(instances)-instancesPerRepro]
reproInstances += instancesPerRepro
atomic.AddUint32(&mgr.numReproducing, 1)
- Logf(1, "loop: starting repro of '%v' on instances %+v", crash.Title, vmIndexes)
+ log.Logf(1, "loop: starting repro of '%v' on instances %+v", crash.Title, vmIndexes)
go func() {
res, err := repro.Run(crash.Output, mgr.cfg, mgr.getReporter(), mgr.vmPool, vmIndexes)
reproDone <- &ReproResult{vmIndexes, crash.Title, res, err, crash.hub}
@@ -458,7 +458,7 @@ func (mgr *Manager) vmLoop() {
last := len(instances) - 1
idx := instances[last]
instances = instances[:last]
- Logf(1, "loop: starting instance %v", idx)
+ log.Logf(1, "loop: starting instance %v", idx)
go func() {
crash, err := mgr.runInstance(idx)
runDone <- &RunResult{idx, crash, err}
@@ -473,12 +473,12 @@ func (mgr *Manager) vmLoop() {
select {
case stopRequest <- true:
- Logf(1, "loop: issued stop request")
+ log.Logf(1, "loop: issued stop request")
stopPending = true
case res := <-runDone:
- Logf(1, "loop: instance %v finished, crash=%v", res.idx, res.crash != nil)
+ log.Logf(1, "loop: instance %v finished, crash=%v", res.idx, res.crash != nil)
if res.err != nil && shutdown != nil {
- Logf(0, "%v", res.err)
+ log.Logf(0, "%v", res.err)
}
stopPending = false
instances = append(instances, res.idx)
@@ -487,7 +487,7 @@ func (mgr *Manager) vmLoop() {
if shutdown != nil && res.crash != nil && !mgr.isSuppressed(res.crash) {
needRepro := mgr.saveCrash(res.crash)
if needRepro {
- Logf(1, "loop: add pending repro for '%v'", res.crash.Title)
+ log.Logf(1, "loop: add pending repro for '%v'", res.crash.Title)
pendingRepro[res.crash] = true
}
}
@@ -499,10 +499,10 @@ func (mgr *Manager) vmLoop() {
crepro = res.res.CRepro
title = res.res.Report.Title
}
- Logf(1, "loop: repro on %+v finished '%v', repro=%v crepro=%v desc='%v'",
+ log.Logf(1, "loop: repro on %+v finished '%v', repro=%v crepro=%v desc='%v'",
res.instances, res.title0, res.res != nil, crepro, title)
if res.err != nil {
- Logf(0, "repro failed: %v", res.err)
+ log.Logf(0, "repro failed: %v", res.err)
}
delete(reproducing, res.title0)
instances = append(instances, res.instances...)
@@ -515,10 +515,10 @@ func (mgr *Manager) vmLoop() {
mgr.saveRepro(res.res, res.hub)
}
case <-shutdown:
- Logf(1, "loop: shutting down...")
+ log.Logf(1, "loop: shutting down...")
shutdown = nil
case crash := <-mgr.hubReproQueue:
- Logf(1, "loop: get repro from hub")
+ log.Logf(1, "loop: get repro from hub")
pendingRepro[crash] = true
case reply := <-mgr.needMoreRepros:
reply <- phase >= phaseTriagedHub &&
@@ -579,7 +579,7 @@ func (mgr *Manager) runInstance(index int) (*Crash, error) {
rep := vm.MonitorExecution(outc, errc, mgr.getReporter(), false)
if rep == nil {
// This is the only "OK" outcome.
- Logf(0, "vm-%v: running for %v, restarting", index, time.Since(start))
+ log.Logf(0, "vm-%v: running for %v, restarting", index, time.Since(start))
return nil, nil
}
cash := &Crash{
@@ -595,7 +595,7 @@ func (mgr *Manager) isSuppressed(crash *Crash) bool {
if !re.Match(crash.Output) {
continue
}
- Logf(0, "vm-%v: suppressing '%v' with '%v'", crash.vmIndex, crash.Title, re.String())
+ log.Logf(0, "vm-%v: suppressing '%v' with '%v'", crash.vmIndex, crash.Title, re.String())
mgr.mu.Lock()
mgr.stats["suppressed"]++
mgr.mu.Unlock()
@@ -610,12 +610,12 @@ func (mgr *Manager) emailCrash(crash *Crash) {
}
args := []string{"-s", "syzkaller: " + crash.Title}
args = append(args, mgr.cfg.Email_Addrs...)
- Logf(0, "sending email to %v", mgr.cfg.Email_Addrs)
+ log.Logf(0, "sending email to %v", mgr.cfg.Email_Addrs)
cmd := exec.Command("mailx", args...)
cmd.Stdin = bytes.NewReader(crash.Report.Report)
if _, err := osutil.Run(10*time.Minute, cmd); err != nil {
- Logf(0, "failed to send email: %v", err)
+ log.Logf(0, "failed to send email: %v", err)
}
}
@@ -624,9 +624,9 @@ func (mgr *Manager) saveCrash(crash *Crash) bool {
if crash.Corrupted {
corrupted = " [corrupted]"
}
- Logf(0, "vm-%v: crash: %v%v", crash.vmIndex, crash.Title, corrupted)
+ log.Logf(0, "vm-%v: crash: %v%v", crash.vmIndex, crash.Title, corrupted)
if err := mgr.getReporter().Symbolize(crash.Report); err != nil {
- Logf(0, "failed to symbolize report: %v", err)
+ log.Logf(0, "failed to symbolize report: %v", err)
}
mgr.mu.Lock()
@@ -648,7 +648,7 @@ func (mgr *Manager) saveCrash(crash *Crash) bool {
}
resp, err := mgr.dash.ReportCrash(dc)
if err != nil {
- Logf(0, "failed to report crash to dashboard: %v", err)
+ log.Logf(0, "failed to report crash to dashboard: %v", err)
} else {
// Don't store the crash locally, if we've successfully
// uploaded it to the dashboard. These will just eat disk space.
@@ -661,7 +661,7 @@ func (mgr *Manager) saveCrash(crash *Crash) bool {
dir := filepath.Join(mgr.crashdir, id)
osutil.MkdirAll(dir)
if err := osutil.WriteFile(filepath.Join(dir, "description"), []byte(crash.Title+"\n")); err != nil {
- Logf(0, "failed to write crash: %v", err)
+ log.Logf(0, "failed to write crash: %v", err)
}
// Save up to 100 reports. If we already have 100, overwrite the oldest one.
// Newer reports are generally more useful. Overwriting is also needed
@@ -719,7 +719,7 @@ func (mgr *Manager) saveFailedRepro(desc string) {
Title: desc,
}
if err := mgr.dash.ReportFailedRepro(cid); err != nil {
- Logf(0, "failed to report failed repro to dashboard: %v", err)
+ log.Logf(0, "failed to report failed repro to dashboard: %v", err)
}
}
dir := filepath.Join(mgr.crashdir, hash.String([]byte(desc)))
@@ -736,13 +736,13 @@ func (mgr *Manager) saveFailedRepro(desc string) {
func (mgr *Manager) saveRepro(res *repro.Result, hub bool) {
rep := res.Report
if err := mgr.getReporter().Symbolize(rep); err != nil {
- Logf(0, "failed to symbolize repro: %v", err)
+ log.Logf(0, "failed to symbolize repro: %v", err)
}
dir := filepath.Join(mgr.crashdir, hash.String([]byte(rep.Title)))
osutil.MkdirAll(dir)
if err := osutil.WriteFile(filepath.Join(dir, "description"), []byte(rep.Title+"\n")); err != nil {
- Logf(0, "failed to write crash: %v", err)
+ log.Logf(0, "failed to write crash: %v", err)
}
opts := fmt.Sprintf("# %+v\n", res.Opts)
prog := res.Prog.Serialize()
@@ -771,7 +771,7 @@ func (mgr *Manager) saveRepro(res *repro.Result, hub bool) {
osutil.WriteFile(filepath.Join(dir, "repro.cprog"), cprog)
cprogText = cprog
} else {
- Logf(0, "failed to write C source: %v", err)
+ log.Logf(0, "failed to write C source: %v", err)
}
}
@@ -801,7 +801,7 @@ func (mgr *Manager) saveRepro(res *repro.Result, hub bool) {
ReproC: cprogText,
}
if _, err := mgr.dash.ReportCrash(dc); err != nil {
- Logf(0, "failed to report repro to dashboard: %v", err)
+ log.Logf(0, "failed to report repro to dashboard: %v", err)
}
}
}
@@ -820,7 +820,7 @@ func (mgr *Manager) getReporter() report.Reporter {
mgr.reporter, err = report.NewReporter(mgr.cfg.TargetOS, kernelSrc, kernelObj,
allSymbols, mgr.cfg.ParsedIgnores)
if err != nil {
- Fatalf("%v", err)
+ log.Fatalf("%v", err)
}
}
return mgr.reporter
@@ -835,12 +835,12 @@ func (mgr *Manager) minimizeCorpus() {
Context: inp,
})
}
- newCorpus := make(map[string]RPCInput)
+ newCorpus := make(map[string]rpctype.RPCInput)
for _, ctx := range signal.Minimize(inputs) {
- inp := ctx.(RPCInput)
+ inp := ctx.(rpctype.RPCInput)
newCorpus[hash.String(inp.Prog)] = inp
}
- Logf(1, "minimized corpus: %v -> %v", len(mgr.corpus), len(newCorpus))
+ log.Logf(1, "minimized corpus: %v -> %v", len(mgr.corpus), len(newCorpus))
mgr.corpus = newCorpus
}
@@ -857,14 +857,14 @@ func (mgr *Manager) minimizeCorpus() {
}
}
-func (mgr *Manager) Connect(a *ConnectArgs, r *ConnectRes) error {
- Logf(1, "fuzzer %v connected", a.Name)
+func (mgr *Manager) Connect(a *rpctype.ConnectArgs, r *rpctype.ConnectRes) error {
+ log.Logf(1, "fuzzer %v connected", a.Name)
mgr.mu.Lock()
defer mgr.mu.Unlock()
if mgr.firstConnect.IsZero() {
mgr.firstConnect = time.Now()
- Logf(0, "received first connection from test machine %v", a.Name)
+ log.Logf(0, "received first connection from test machine %v", a.Name)
}
mgr.stats["vm restarts"]++
@@ -915,34 +915,34 @@ func (mgr *Manager) Connect(a *ConnectArgs, r *ConnectRes) error {
return nil
}
-func (mgr *Manager) Check(a *CheckArgs, r *int) error {
+func (mgr *Manager) Check(a *rpctype.CheckArgs, r *int) error {
mgr.mu.Lock()
defer mgr.mu.Unlock()
if mgr.vmChecked {
return nil
}
- Logf(0, "machine check: %v calls enabled, kcov=%v, kleakcheck=%v, faultinjection=%v, comps=%v",
+ log.Logf(0, "machine check: %v calls enabled, kcov=%v, kleakcheck=%v, faultinjection=%v, comps=%v",
len(a.Calls), a.Kcov, a.Leak, a.Fault, a.CompsSupported)
if mgr.cfg.Cover && !a.Kcov {
- Fatalf("/sys/kernel/debug/kcov is missing on target machine. Enable CONFIG_KCOV and mount debugfs")
+ log.Fatalf("/sys/kernel/debug/kcov is missing on target machine. Enable CONFIG_KCOV and mount debugfs")
}
if mgr.cfg.Sandbox == "namespace" && !a.UserNamespaces {
- Fatalf("/proc/self/ns/user is missing on target machine or permission is denied. Can't use requested namespace sandbox. Enable CONFIG_USER_NS")
+ log.Fatalf("/proc/self/ns/user is missing on target machine or permission is denied. Can't use requested namespace sandbox. Enable CONFIG_USER_NS")
}
if mgr.vmPool != nil {
if mgr.target.Arch != a.ExecutorArch {
- Fatalf("mismatching target/executor arch: target=%v executor=%v",
+ log.Fatalf("mismatching target/executor arch: target=%v executor=%v",
mgr.target.Arch, a.ExecutorArch)
}
if sys.GitRevision != a.FuzzerGitRev || sys.GitRevision != a.ExecutorGitRev {
- Fatalf("syz-manager, syz-fuzzer and syz-executor binaries are built on different git revisions\n"+
+ log.Fatalf("syz-manager, syz-fuzzer and syz-executor binaries are built on different git revisions\n"+
"manager= %v\nfuzzer= %v\nexecutor=%v\n"+
"this is not supported, rebuild all binaries with make",
sys.GitRevision, a.FuzzerGitRev, a.ExecutorGitRev)
}
if mgr.target.Revision != a.FuzzerSyzRev || mgr.target.Revision != a.ExecutorSyzRev {
- Fatalf("syz-manager, syz-fuzzer and syz-executor binaries have different versions of system call descriptions compiled in\n"+
+ log.Fatalf("syz-manager, syz-fuzzer and syz-executor binaries have different versions of system call descriptions compiled in\n"+
"manager= %v\nfuzzer= %v\nexecutor=%v\n"+
"this is not supported, rebuild all binaries with make",
mgr.target.Revision, a.FuzzerSyzRev, a.ExecutorSyzRev)
@@ -956,33 +956,33 @@ func (mgr *Manager) Check(a *CheckArgs, r *int) error {
for _, id := range mgr.enabledSyscalls {
name := mgr.target.Syscalls[id].Name
if reason := disabled[name]; reason != "" {
- Logf(0, "disabling %v: %v", name, reason)
+ log.Logf(0, "disabling %v: %v", name, reason)
}
}
}
if len(a.Calls) == 0 {
- Fatalf("all system calls are disabled")
+ log.Fatalf("all system calls are disabled")
}
mgr.vmChecked = true
mgr.enabledCalls = a.Calls
return nil
}
-func (mgr *Manager) NewInput(a *NewInputArgs, r *int) error {
+func (mgr *Manager) NewInput(a *rpctype.NewInputArgs, r *int) error {
inputSignal := a.Signal.Deserialize()
- Logf(4, "new input from %v for syscall %v (signal=%v, cover=%v)",
+ log.Logf(4, "new input from %v for syscall %v (signal=%v, cover=%v)",
a.Name, a.Call, inputSignal.Len(), len(a.Cover))
mgr.mu.Lock()
defer mgr.mu.Unlock()
f := mgr.fuzzers[a.Name]
if f == nil {
- Fatalf("fuzzer %v is not connected", a.Name)
+ log.Fatalf("fuzzer %v is not connected", a.Name)
}
if _, err := mgr.target.Deserialize(a.RPCInput.Prog); err != nil {
// This should not happen, but we see such cases episodically, reason unknown.
- Logf(0, "failed to deserialize program from fuzzer: %v\n%s", err, a.RPCInput.Prog)
+ log.Logf(0, "failed to deserialize program from fuzzer: %v\n%s", err, a.RPCInput.Prog)
return nil
}
if mgr.corpusSignal.Diff(inputSignal).Empty() {
@@ -1005,7 +1005,7 @@ func (mgr *Manager) NewInput(a *NewInputArgs, r *int) error {
mgr.corpus[sig] = a.RPCInput
mgr.corpusDB.Save(sig, a.RPCInput.Prog, 0)
if err := mgr.corpusDB.Flush(); err != nil {
- Logf(0, "failed to save corpus database: %v", err)
+ log.Logf(0, "failed to save corpus database: %v", err)
}
for _, f1 := range mgr.fuzzers {
if f1 == f {
@@ -1019,7 +1019,7 @@ func (mgr *Manager) NewInput(a *NewInputArgs, r *int) error {
return nil
}
-func (mgr *Manager) Poll(a *PollArgs, r *PollRes) error {
+func (mgr *Manager) Poll(a *rpctype.PollArgs, r *rpctype.PollRes) error {
mgr.mu.Lock()
defer mgr.mu.Unlock()
@@ -1029,7 +1029,7 @@ func (mgr *Manager) Poll(a *PollArgs, r *PollRes) error {
f := mgr.fuzzers[a.Name]
if f == nil {
- Fatalf("fuzzer %v is not connected", a.Name)
+ log.Fatalf("fuzzer %v is not connected", a.Name)
}
newMaxSignal := mgr.maxSignal.Diff(a.MaxSignal.Deserialize())
if !newMaxSignal.Empty() {
@@ -1071,7 +1071,7 @@ func (mgr *Manager) Poll(a *PollArgs, r *PollRes) error {
}
}
}
- Logf(4, "poll from %v: candidates=%v inputs=%v", a.Name, len(r.Candidates), len(r.NewInputs))
+ log.Logf(4, "poll from %v: candidates=%v inputs=%v", a.Name, len(r.Candidates), len(r.NewInputs))
return nil
}
@@ -1095,7 +1095,7 @@ func (mgr *Manager) hubSync() {
mgr.minimizeCorpus()
if mgr.hub == nil {
- a := &HubConnectArgs{
+ a := &rpctype.HubConnectArgs{
Client: mgr.cfg.Hub_Client,
Key: mgr.cfg.Hub_Key,
Manager: mgr.cfg.Name,
@@ -1111,25 +1111,25 @@ func (mgr *Manager) hubSync() {
// Hub.Connect request can be very large, so do it on a transient connection
// (rpc connection buffers never shrink).
// Also don't do hub rpc's under the mutex -- hub can be slow or inaccessible.
- if err := RPCCall(mgr.cfg.Hub_Addr, "Hub.Connect", a, nil); err != nil {
+ if err := rpctype.RPCCall(mgr.cfg.Hub_Addr, "Hub.Connect", a, nil); err != nil {
mgr.mu.Lock()
- Logf(0, "Hub.Connect rpc failed: %v", err)
+ log.Logf(0, "Hub.Connect rpc failed: %v", err)
return
}
- conn, err := NewRPCClient(mgr.cfg.Hub_Addr)
+ conn, err := rpctype.NewRPCClient(mgr.cfg.Hub_Addr)
if err != nil {
mgr.mu.Lock()
- Logf(0, "failed to connect to hub at %v: %v", mgr.cfg.Hub_Addr, err)
+ log.Logf(0, "failed to connect to hub at %v: %v", mgr.cfg.Hub_Addr, err)
return
}
mgr.mu.Lock()
mgr.hub = conn
mgr.hubCorpus = hubCorpus
mgr.fresh = false
- Logf(0, "connected to hub at %v, corpus %v", mgr.cfg.Hub_Addr, len(mgr.corpus))
+ log.Logf(0, "connected to hub at %v, corpus %v", mgr.cfg.Hub_Addr, len(mgr.corpus))
}
- a := &HubSyncArgs{
+ a := &rpctype.HubSyncArgs{
Client: mgr.cfg.Hub_Client,
Key: mgr.cfg.Hub_Key,
Manager: mgr.cfg.Name,
@@ -1162,10 +1162,10 @@ func (mgr *Manager) hubSync() {
a.NeedRepros = <-needReproReply
}
- r := new(HubSyncRes)
+ r := new(rpctype.HubSyncRes)
if err := mgr.hub.Call("Hub.Sync", a, r); err != nil {
mgr.mu.Lock()
- Logf(0, "Hub.Sync rpc failed: %v", err)
+ log.Logf(0, "Hub.Sync rpc failed: %v", err)
mgr.hub.Close()
mgr.hub = nil
return
@@ -1197,7 +1197,7 @@ func (mgr *Manager) hubSync() {
dropped++
continue
}
- mgr.candidates = append(mgr.candidates, RPCCandidate{
+ mgr.candidates = append(mgr.candidates, rpctype.RPCCandidate{
Prog: inp,
Minimized: false, // don't trust programs from hub
Smashed: false,
@@ -1209,7 +1209,7 @@ func (mgr *Manager) hubSync() {
mgr.stats["hub new"] += uint64(len(r.Progs) - dropped)
mgr.stats["hub sent repros"] += uint64(len(a.Repros))
mgr.stats["hub recv repros"] += uint64(len(r.Repros) - reproDropped)
- Logf(0, "hub sync: send: add %v, del %v, repros %v; recv: progs: drop %v, new %v, repros: drop: %v, new %v; more %v",
+ log.Logf(0, "hub sync: send: add %v, del %v, repros %v; recv: progs: drop %v, new %v, repros: drop: %v, new %v; more %v",
len(a.Add), len(a.Del), len(a.Repros), dropped, len(r.Progs)-dropped, reproDropped, len(r.Repros)-reproDropped, r.More)
if len(r.Progs)+r.More == 0 {
break
@@ -1229,7 +1229,7 @@ func (mgr *Manager) collectUsedFiles() {
}
stat, err := os.Stat(f)
if err != nil {
- Fatalf("failed to stat %v: %v", f, err)
+ log.Fatalf("failed to stat %v: %v", f, err)
}
mgr.usedFiles[f] = stat.ModTime()
}
@@ -1248,10 +1248,10 @@ func (mgr *Manager) checkUsedFiles() {
for f, mod := range mgr.usedFiles {
stat, err := os.Stat(f)
if err != nil {
- Fatalf("failed to stat %v: %v", f, err)
+ log.Fatalf("failed to stat %v: %v", f, err)
}
if mod != stat.ModTime() {
- Fatalf("file %v that syz-manager uses has been modified by an external program\n"+
+ log.Fatalf("file %v that syz-manager uses has been modified by an external program\n"+
"this can lead to arbitrary syz-manager misbehavior\n"+
"modification time has changed: %v -> %v\n"+
"don't modify files that syz-manager uses. exiting to prevent harm",
@@ -1286,7 +1286,7 @@ func (mgr *Manager) dashboardReporter() {
mgr.mu.Unlock()
if err := mgr.dash.UploadManagerStats(req); err != nil {
- Logf(0, "faield to upload dashboard stats: %v", err)
+ log.Logf(0, "faield to upload dashboard stats: %v", err)
continue
}
mgr.mu.Lock()
diff --git a/tools/syz-execprog/execprog.go b/tools/syz-execprog/execprog.go
index b59a55682..e64172d9f 100644
--- a/tools/syz-execprog/execprog.go
+++ b/tools/syz-execprog/execprog.go
@@ -17,7 +17,7 @@ import (
"github.com/google/syzkaller/pkg/cover"
"github.com/google/syzkaller/pkg/ipc"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/prog"
_ "github.com/google/syzkaller/sys"
@@ -45,25 +45,25 @@ func main() {
target, err := prog.GetTarget(*flagOS, *flagArch)
if err != nil {
- Fatalf("%v", err)
+ log.Fatalf("%v", err)
}
var entries []*prog.LogEntry
for _, fn := range flag.Args() {
data, err := ioutil.ReadFile(fn)
if err != nil {
- Fatalf("failed to read log file: %v", err)
+ log.Fatalf("failed to read log file: %v", err)
}
entries = append(entries, target.ParseLog(data)...)
}
- Logf(0, "parsed %v programs", len(entries))
+ log.Logf(0, "parsed %v programs", len(entries))
if len(entries) == 0 {
return
}
config, execOpts, err := ipc.DefaultConfig()
if err != nil {
- Fatalf("%v", err)
+ log.Fatalf("%v", err)
}
if config.Flags&ipc.FlagSignal != 0 {
execOpts.Flags |= ipc.FlagCollectCover
@@ -110,7 +110,7 @@ func main() {
defer wg.Done()
env, err := ipc.MakeEnv(config, pid)
if err != nil {
- Fatalf("failed to create ipc env: %v", err)
+ log.Fatalf("failed to create ipc env: %v", err)
}
defer env.Close()
for {
@@ -123,7 +123,7 @@ func main() {
idx := pos
pos++
if idx%len(entries) == 0 && time.Since(lastPrint) > 5*time.Second {
- Logf(0, "executed programs: %v", idx)
+ log.Logf(0, "executed programs: %v", idx)
lastPrint = time.Now()
}
posMu.Unlock()
@@ -147,7 +147,7 @@ func main() {
}
data := entry.P.Serialize()
logMu.Lock()
- Logf(0, "executing program %v%v:\n%s", pid, strOpts, data)
+ log.Logf(0, "executing program %v%v:\n%s", pid, strOpts, data)
logMu.Unlock()
}
output, info, failed, hanged, err := env.Exec(callOpts, entry.P)
@@ -157,21 +157,21 @@ func main() {
default:
}
if failed {
- Logf(0, "BUG: executor-detected bug:\n%s", output)
+ log.Logf(0, "BUG: executor-detected bug:\n%s", output)
}
if config.Flags&ipc.FlagDebug != 0 || err != nil {
- Logf(0, "result: failed=%v hanged=%v err=%v\n\n%s",
+ log.Logf(0, "result: failed=%v hanged=%v err=%v\n\n%s",
failed, hanged, err, output)
}
if len(info) != 0 {
- Logf(1, "RESULT: signal %v, coverage %v errno %v",
+ log.Logf(1, "RESULT: signal %v, coverage %v errno %v",
len(info[0].Signal), len(info[0].Cover), info[0].Errno)
} else {
- Logf(1, "RESULT: no calls executed")
+ log.Logf(1, "RESULT: no calls executed")
}
if *flagCoverFile != "" {
for i, inf := range info {
- Logf(0, "call #%v: signal %v, coverage %v",
+ log.Logf(0, "call #%v: signal %v, coverage %v",
i, len(inf.Signal), len(inf.Cover))
if len(inf.Cover) == 0 {
continue
@@ -182,7 +182,7 @@ func main() {
}
err := osutil.WriteFile(fmt.Sprintf("%v.%v", *flagCoverFile, i), buf.Bytes())
if err != nil {
- Fatalf("failed to write coverage file: %v", err)
+ log.Fatalf("failed to write coverage file: %v", err)
}
}
}
@@ -206,11 +206,11 @@ func main() {
entry.P.MutateWithHints(i, comps, func(p *prog.Prog) {
ncandidates++
if *flagOutput == "stdout" {
- Logf(1, "PROGRAM:\n%s", p.Serialize())
+ log.Logf(1, "PROGRAM:\n%s", p.Serialize())
}
})
}
- Logf(0, "ncomps=%v ncandidates=%v", ncomps, ncandidates)
+ log.Logf(0, "ncomps=%v ncandidates=%v", ncomps, ncandidates)
}
return true
}() {
diff --git a/tools/syz-stress/stress.go b/tools/syz-stress/stress.go
index 2736c2f6f..504e5b58b 100644
--- a/tools/syz-stress/stress.go
+++ b/tools/syz-stress/stress.go
@@ -16,7 +16,7 @@ import (
"github.com/google/syzkaller/pkg/db"
"github.com/google/syzkaller/pkg/host"
"github.com/google/syzkaller/pkg/ipc"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/prog"
_ "github.com/google/syzkaller/sys"
)
@@ -40,12 +40,12 @@ func main() {
flag.Parse()
target, err := prog.GetTarget(*flagOS, *flagArch)
if err != nil {
- Fatalf("%v", err)
+ log.Fatalf("%v", err)
}
corpus := readCorpus(target)
- Logf(0, "parsed %v programs", len(corpus))
+ log.Logf(0, "parsed %v programs", len(corpus))
if !*flagGenerate && len(corpus) == 0 {
- Fatalf("nothing to mutate (-generate=false and no corpus)")
+ log.Fatalf("nothing to mutate (-generate=false and no corpus)")
}
calls := buildCallList(target)
@@ -54,7 +54,7 @@ func main() {
config, execOpts, err := ipc.DefaultConfig()
if err != nil {
- Fatalf("%v", err)
+ log.Fatalf("%v", err)
}
gate = ipc.NewGate(2**flagProcs, nil)
for pid := 0; pid < *flagProcs; pid++ {
@@ -62,7 +62,7 @@ func main() {
go func() {
env, err := ipc.MakeEnv(config, pid)
if err != nil {
- Fatalf("failed to create execution environment: %v", err)
+ log.Fatalf("failed to create execution environment: %v", err)
}
rs := rand.NewSource(time.Now().UnixNano() + int64(pid)*1e12)
rnd := rand.New(rs)
@@ -84,7 +84,7 @@ func main() {
}()
}
for range time.NewTicker(5 * time.Second).C {
- Logf(0, "executed %v programs", atomic.LoadUint64(&statExec))
+ log.Logf(0, "executed %v programs", atomic.LoadUint64(&statExec))
}
}
@@ -117,13 +117,13 @@ func readCorpus(target *prog.Target) []*prog.Prog {
}
db, err := db.Open(*flagCorpus)
if err != nil {
- Fatalf("failed to open corpus database: %v", err)
+ log.Fatalf("failed to open corpus database: %v", err)
}
var progs []*prog.Prog
for _, rec := range db.Records {
p, err := target.Deserialize(rec.Val)
if err != nil {
- Fatalf("failed to deserialize corpus program: %v", err)
+ log.Fatalf("failed to deserialize corpus program: %v", err)
}
progs = append(progs, p)
}
@@ -141,18 +141,18 @@ func buildCallList(target *prog.Target) map[*prog.Syscall]bool {
}
calls, disabled, err := host.DetectSupportedSyscalls(target, "none")
if err != nil {
- Logf(0, "failed to detect host supported syscalls: %v", err)
+ log.Logf(0, "failed to detect host supported syscalls: %v", err)
calls = make(map[*prog.Syscall]bool)
for _, c := range target.Syscalls {
calls[c] = true
}
}
for c, reason := range disabled {
- Logf(0, "unsupported syscall: %v: %v", c.Name, reason)
+ log.Logf(0, "unsupported syscall: %v: %v", c.Name, reason)
}
calls, disabled = target.TransitivelyEnabledCalls(calls)
for c, reason := range disabled {
- Logf(0, "transitively unsupported: %v: %v", c.Name, reason)
+ log.Logf(0, "transitively unsupported: %v: %v", c.Name, reason)
}
return calls
}
diff --git a/vm/adb/adb.go b/vm/adb/adb.go
index 853f541e1..d2fc73c76 100644
--- a/vm/adb/adb.go
+++ b/vm/adb/adb.go
@@ -18,7 +18,7 @@ import (
"time"
"github.com/google/syzkaller/pkg/config"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/vm/vmimpl"
)
@@ -138,16 +138,16 @@ func findConsole(adb, dev string) string {
}
con, err := findConsoleImpl(adb, dev)
if err != nil {
- Logf(0, "failed to associate adb device %v with console: %v", dev, err)
- Logf(0, "falling back to 'adb shell dmesg -w'")
- Logf(0, "note: some bugs may be detected as 'lost connection to test machine' with no kernel output")
+ log.Logf(0, "failed to associate adb device %v with console: %v", dev, err)
+ log.Logf(0, "falling back to 'adb shell dmesg -w'")
+ log.Logf(0, "note: some bugs may be detected as 'lost connection to test machine' with no kernel output")
con = "adb"
devToConsole[dev] = con
return con
}
devToConsole[dev] = con
consoleToDev[con] = dev
- Logf(0, "associating adb device %v with console %v", dev, con)
+ log.Logf(0, "associating adb device %v with console %v", dev, con)
return con
}
@@ -231,7 +231,7 @@ func (inst *instance) Forward(port int) (string, error) {
func (inst *instance) adb(args ...string) ([]byte, error) {
if inst.debug {
- Logf(0, "executing adb %+v", args)
+ log.Logf(0, "executing adb %+v", args)
}
rpipe, wpipe, err := os.Pipe()
if err != nil {
@@ -251,7 +251,7 @@ func (inst *instance) adb(args ...string) ([]byte, error) {
select {
case <-time.After(time.Minute):
if inst.debug {
- Logf(0, "adb hanged")
+ log.Logf(0, "adb hanged")
}
cmd.Process.Kill()
case <-done:
@@ -261,13 +261,13 @@ func (inst *instance) adb(args ...string) ([]byte, error) {
close(done)
out, _ := ioutil.ReadAll(rpipe)
if inst.debug {
- Logf(0, "adb failed: %v\n%s", err, out)
+ log.Logf(0, "adb failed: %v\n%s", err, out)
}
return nil, fmt.Errorf("adb %+v failed: %v\n%s", args, err, out)
}
close(done)
if inst.debug {
- Logf(0, "adb returned")
+ log.Logf(0, "adb returned")
}
out, _ := ioutil.ReadAll(rpipe)
return out, nil
@@ -320,11 +320,11 @@ func (inst *instance) checkBatteryLevel() error {
return err
}
if val >= minLevel {
- Logf(0, "device %v: battery level %v%%, OK", inst.device, val)
+ log.Logf(0, "device %v: battery level %v%%, OK", inst.device, val)
return nil
}
for {
- Logf(0, "device %v: battery level %v%%, waiting for %v%%", inst.device, val, requiredLevel)
+ log.Logf(0, "device %v: battery level %v%%, waiting for %v%%", inst.device, val, requiredLevel)
if !vmimpl.SleepInterruptible(time.Minute) {
return nil
}
@@ -400,7 +400,7 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
return nil, nil, err
}
if inst.debug {
- Logf(0, "starting: adb shell %v", command)
+ log.Logf(0, "starting: adb shell %v", command)
}
adb := osutil.Command(inst.adbBin, "-s", inst.device, "shell", "cd /data; "+command)
adb.Stdout = adbWpipe
@@ -437,7 +437,7 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
signal(vmimpl.ErrTimeout)
case <-inst.closed:
if inst.debug {
- Logf(0, "instance closed")
+ log.Logf(0, "instance closed")
}
signal(fmt.Errorf("instance closed"))
case err := <-merger.Err:
diff --git a/vm/gce/gce.go b/vm/gce/gce.go
index 782712c4a..7f3281382 100644
--- a/vm/gce/gce.go
+++ b/vm/gce/gce.go
@@ -26,7 +26,7 @@ import (
"github.com/google/syzkaller/pkg/gce"
"github.com/google/syzkaller/pkg/gcs"
"github.com/google/syzkaller/pkg/kd"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/vm/vmimpl"
)
@@ -94,17 +94,17 @@ func ctor(env *vmimpl.Env) (vmimpl.Pool, error) {
if err != nil {
return nil, fmt.Errorf("failed to init gce: %v", err)
}
- Logf(0, "GCE initialized: running on %v, internal IP %v, project %v, zone %v, net %v/%v",
+ log.Logf(0, "GCE initialized: running on %v, internal IP %v, project %v, zone %v, net %v/%v",
GCE.Instance, GCE.InternalIP, GCE.ProjectID, GCE.ZoneID, GCE.Network, GCE.Subnetwork)
if cfg.GCE_Image == "" {
cfg.GCE_Image = env.Name
gcsImage := filepath.Join(cfg.GCS_Path, env.Name+"-image.tar.gz")
- Logf(0, "uploading image to %v...", gcsImage)
+ log.Logf(0, "uploading image to %v...", gcsImage)
if err := uploadImageToGCS(env.Image, gcsImage); err != nil {
return nil, err
}
- Logf(0, "creating GCE image %v...", cfg.GCE_Image)
+ log.Logf(0, "creating GCE image %v...", cfg.GCE_Image)
if err := GCE.DeleteImage(cfg.GCE_Image); err != nil {
return nil, fmt.Errorf("failed to delete GCE image: %v", err)
}
@@ -137,11 +137,11 @@ func (pool *Pool) Create(workdir string, index int) (vmimpl.Instance, error) {
return nil, fmt.Errorf("failed to read file: %v", err)
}
- Logf(0, "deleting instance: %v", name)
+ log.Logf(0, "deleting instance: %v", name)
if err := pool.GCE.DeleteInstance(name, true); err != nil {
return nil, err
}
- Logf(0, "creating instance: %v", name)
+ log.Logf(0, "creating instance: %v", name)
ip, err := pool.GCE.CreateInstance(name, pool.cfg.Machine_Type, pool.cfg.GCE_Image, string(gceKeyPub))
if err != nil {
return nil, err
@@ -160,7 +160,7 @@ func (pool *Pool) Create(workdir string, index int) (vmimpl.Instance, error) {
sshKey = gceKey
sshUser = "syzkaller"
}
- Logf(0, "wait instance to boot: %v (%v)", name, ip)
+ log.Logf(0, "wait instance to boot: %v (%v)", name, ip)
if err := pool.waitInstanceBoot(name, ip, sshKey, sshUser, gceKey); err != nil {
return nil, err
}
@@ -327,13 +327,13 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
} else if merr, ok := err.(vmimpl.MergerError); ok && merr.R == conRpipe {
// Console connection must never fail. If it does, it's either
// instance preemption or a GCE bug. In either case, not a kernel bug.
- Logf(1, "%v: gce console connection failed with %v", inst.name, merr.Err)
+ log.Logf(1, "%v: gce console connection failed with %v", inst.name, merr.Err)
err = vmimpl.ErrTimeout
} else {
// Check if the instance was terminated due to preemption or host maintenance.
time.Sleep(5 * time.Second) // just to avoid any GCE races
if !inst.GCE.IsInstanceRunning(inst.name) {
- Logf(1, "%v: ssh exited but instance is not running", inst.name)
+ log.Logf(1, "%v: ssh exited but instance is not running", inst.name)
err = vmimpl.ErrTimeout
}
}
@@ -475,11 +475,11 @@ func uploadImageToGCS(localImage, gcsImage string) error {
func runCmd(debug bool, bin string, args ...string) ([]byte, error) {
if debug {
- Logf(0, "running command: %v %#v", bin, args)
+ log.Logf(0, "running command: %v %#v", bin, args)
}
output, err := osutil.RunCmd(time.Minute, "", bin, args...)
if debug {
- Logf(0, "result: %v\n%s", err, output)
+ log.Logf(0, "result: %v\n%s", err, output)
}
return output, err
}
diff --git a/vm/isolated/isolated.go b/vm/isolated/isolated.go
index b746c91b5..449228192 100644
--- a/vm/isolated/isolated.go
+++ b/vm/isolated/isolated.go
@@ -14,7 +14,7 @@ import (
"time"
"github.com/google/syzkaller/pkg/config"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/vm/vmimpl"
)
@@ -117,7 +117,7 @@ func (inst *instance) Forward(port int) (string, error) {
func (inst *instance) ssh(command string) ([]byte, error) {
if inst.debug {
- Logf(0, "executing ssh %+v", command)
+ log.Logf(0, "executing ssh %+v", command)
}
rpipe, wpipe, err := osutil.LongPipe()
@@ -127,7 +127,7 @@ func (inst *instance) ssh(command string) ([]byte, error) {
args := append(inst.sshArgs("-p"), inst.target, command)
if inst.debug {
- Logf(0, "running command: ssh %#v", args)
+ log.Logf(0, "running command: ssh %#v", args)
}
cmd := osutil.Command("ssh", args...)
cmd.Stdout = wpipe
@@ -143,7 +143,7 @@ func (inst *instance) ssh(command string) ([]byte, error) {
select {
case <-time.After(time.Second * 30):
if inst.debug {
- Logf(0, "ssh hanged")
+ log.Logf(0, "ssh hanged")
}
cmd.Process.Kill()
case <-done:
@@ -153,39 +153,39 @@ func (inst *instance) ssh(command string) ([]byte, error) {
close(done)
out, _ := ioutil.ReadAll(rpipe)
if inst.debug {
- Logf(0, "ssh failed: %v\n%s", err, out)
+ log.Logf(0, "ssh failed: %v\n%s", err, out)
}
return nil, fmt.Errorf("ssh %+v failed: %v\n%s", args, err, out)
}
close(done)
if inst.debug {
- Logf(0, "ssh returned")
+ log.Logf(0, "ssh returned")
}
out, _ := ioutil.ReadAll(rpipe)
return out, nil
}
func (inst *instance) repair() error {
- Logf(2, "isolated: trying to ssh")
+ log.Logf(2, "isolated: trying to ssh")
if err := inst.waitForSSH(30 * 60); err == nil {
if inst.cfg.Target_Reboot {
- Logf(2, "isolated: trying to reboot")
+ log.Logf(2, "isolated: trying to reboot")
inst.ssh("reboot") // reboot will return an error, ignore it
if err := inst.waitForReboot(5 * 60); err != nil {
- Logf(2, "isolated: machine did not reboot")
+ log.Logf(2, "isolated: machine did not reboot")
return err
}
- Logf(2, "isolated: rebooted wait for comeback")
+ log.Logf(2, "isolated: rebooted wait for comeback")
if err := inst.waitForSSH(30 * 60); err != nil {
- Logf(2, "isolated: machine did not comeback")
+ log.Logf(2, "isolated: machine did not comeback")
return err
}
- Logf(2, "isolated: reboot succeeded")
+ log.Logf(2, "isolated: reboot succeeded")
} else {
- Logf(2, "isolated: ssh succeeded")
+ log.Logf(2, "isolated: ssh succeeded")
}
} else {
- Logf(2, "isolated: ssh failed")
+ log.Logf(2, "isolated: ssh failed")
return fmt.Errorf("SSH failed")
}
@@ -238,7 +238,7 @@ func (inst *instance) Copy(hostSrc string) (string, error) {
args := append(inst.sshArgs("-P"), hostSrc, inst.target+":"+vmDst)
cmd := osutil.Command("scp", args...)
if inst.debug {
- Logf(0, "running command: scp %#v", args)
+ log.Logf(0, "running command: scp %#v", args)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stdout
}
@@ -281,9 +281,9 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
args = append(args, "-R", proxy)
}
args = append(args, inst.target, "cd "+inst.cfg.Target_Dir+" && exec "+command)
- Logf(0, "running command: ssh %#v", args)
+ log.Logf(0, "running command: ssh %#v", args)
if inst.debug {
- Logf(0, "running command: ssh %#v", args)
+ log.Logf(0, "running command: ssh %#v", args)
}
cmd := osutil.Command("ssh", args...)
cmd.Stdout = wpipe
@@ -320,7 +320,7 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
signal(vmimpl.ErrTimeout)
case <-inst.closed:
if inst.debug {
- Logf(0, "instance closed")
+ log.Logf(0, "instance closed")
}
signal(fmt.Errorf("instance closed"))
case err := <-merger.Err:
diff --git a/vm/qemu/qemu.go b/vm/qemu/qemu.go
index b44f4198c..f82c88e1f 100644
--- a/vm/qemu/qemu.go
+++ b/vm/qemu/qemu.go
@@ -16,7 +16,7 @@ import (
"time"
"github.com/google/syzkaller/pkg/config"
- . "github.com/google/syzkaller/pkg/log"
+ "github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/vm/vmimpl"
)
@@ -306,7 +306,7 @@ func (inst *instance) Boot() error {
)
}
if inst.debug {
- Logf(0, "running command: %v %#v", inst.cfg.Qemu, args)
+ log.Logf(0, "running command: %v %#v", inst.cfg.Qemu, args)
}
qemu := osutil.Command(inst.cfg.Qemu, args...)
qemu.Stdout = inst.wpipe
@@ -396,7 +396,7 @@ func (inst *instance) Copy(hostSrc string) (string, error) {
args := append(inst.sshArgs("-P"), hostSrc, inst.sshuser+"@localhost:"+vmDst)
cmd := osutil.Command("scp", args...)
if inst.debug {
- Logf(0, "running command: scp %#v", args)
+ log.Logf(0, "running command: scp %#v", args)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stdout
}
@@ -428,7 +428,7 @@ func (inst *instance) Run(timeout time.Duration, stop <-chan bool, command strin
args := append(inst.sshArgs("-p"), inst.sshuser+"@localhost", command)
if inst.debug {
- Logf(0, "running command: ssh %#v", args)
+ log.Logf(0, "running command: ssh %#v", args)
}
cmd := osutil.Command("ssh", args...)
cmd.Stdout = wpipe