aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2017-02-17 22:09:35 +0100
committerDmitry Vyukov <dvyukov@google.com>2017-02-17 22:22:01 +0100
commit19d8bc6235424c4b1734ded2f3cf723639bc2608 (patch)
tree9f3f32ce310132cdcec13440b9c515af070e4f84
parentd8f047b7fb21efcf610167cd6c72f0942a4f852d (diff)
syz-dash: first version of dashboard app
syz-dash is an appengine app that aggregates crashes from multiple managers. Very early version, still flashing out required functionality.
-rw-r--r--syz-dash/api.go444
-rw-r--r--syz-dash/app.yaml16
-rw-r--r--syz-dash/bug.html134
-rw-r--r--syz-dash/dash.html34
-rw-r--r--syz-dash/handler.go542
-rw-r--r--syz-dash/patch.go70
-rw-r--r--syz-dash/patch_test.go288
-rw-r--r--syz-dash/static/style.css15
-rw-r--r--syz-gce/generated.go1
-rw-r--r--syz-gce/syz-gce.go513
-rw-r--r--tools/syz-dashtool/dashtool.go121
11 files changed, 1976 insertions, 202 deletions
diff --git a/syz-dash/api.go b/syz-dash/api.go
new file mode 100644
index 000000000..a1a049979
--- /dev/null
+++ b/syz-dash/api.go
@@ -0,0 +1,444 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+// +build appengine
+
+package dash
+
+import (
+ "bytes"
+ "compress/gzip"
+ "crypto/sha1"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "appengine"
+ ds "appengine/datastore"
+ "github.com/google/syzkaller/dashboard"
+)
+
+func init() {
+ http.Handle("/api", handlerWrapper(handleAPI))
+}
+
+var apiHandlers = map[string]func(c appengine.Context, r *http.Request) (interface{}, error){
+ "poll_patches": handlePollPatches,
+ "get_patches": handleGetPatches,
+ "add_crash": handleAddCrash,
+ "add_repro": handleAddRepro,
+}
+
+func handleAPI(c appengine.Context, w http.ResponseWriter, r *http.Request) error {
+ client := new(Client)
+ if err := ds.Get(c, ds.NewKey(c, "Client", r.FormValue("client"), 0, nil), client); err != nil {
+ return fmt.Errorf("unknown client")
+ }
+ if r.FormValue("key") != client.Key {
+ return fmt.Errorf("unknown client")
+ }
+ method := r.FormValue("method")
+ handler := apiHandlers[method]
+ if handler == nil {
+ return fmt.Errorf("unknown api method '%v'", method)
+ }
+ res, err := handler(c, r)
+ if err != nil {
+ return err
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
+ w.Header().Set("Content-Encoding", "gzip")
+ gz := gzip.NewWriter(w)
+ json.NewEncoder(gz).Encode(res)
+ gz.Close()
+ } else {
+ json.NewEncoder(w).Encode(res)
+ }
+ return nil
+}
+
+const (
+ BugStatusNew = iota
+ BugStatusReported
+ BugStatusFixed
+ BugStatusUnclear
+ BugStatusClosed
+ BugStatusDeleted
+)
+
+func statusToString(status int) string {
+ switch status {
+ case BugStatusNew:
+ return "new"
+ case BugStatusReported:
+ return "reported"
+ case BugStatusFixed:
+ return "fixed"
+ case BugStatusUnclear:
+ return "unclear"
+ case BugStatusClosed:
+ return "closed"
+ case BugStatusDeleted:
+ return "deleted"
+ default:
+ panic(fmt.Sprintf("unknown status %v", status))
+ }
+}
+
+func stringToStatus(status string) (int, error) {
+ switch status {
+ case "new":
+ return BugStatusNew, nil
+ case "reported":
+ return BugStatusReported, nil
+ case "fixed":
+ return BugStatusFixed, nil
+ case "unclear":
+ return BugStatusUnclear, nil
+ case "closed":
+ return BugStatusClosed, nil
+ case "deleted":
+ return BugStatusDeleted, nil
+ default:
+ return 0, fmt.Errorf("unknown status '%v'", status)
+ }
+}
+
+type Client struct {
+ Name string
+ Key string
+}
+
+type Bug struct {
+ Version int64
+ Title string
+ Status int
+ Groups []string
+ ReportLink string
+ Comment string
+ CVE string
+ Patches []Patch
+}
+
+type Patch struct {
+ Title string
+ Time time.Time
+ Diff int64
+}
+
+type Group struct {
+ Title string
+ Seq int64
+ Bug int64
+ NumCrashes int64
+ NumRepro int64
+ HasRepro bool
+ HasCRepro bool
+ FirstTime time.Time
+ LastTime time.Time
+ Managers []string
+}
+
+func hash(s string) string {
+ sig := sha1.Sum([]byte(s))
+ return hex.EncodeToString(sig[:])
+}
+
+func (group *Group) DisplayTitle() string {
+ t := group.Title
+ if group.Seq != 0 {
+ t += fmt.Sprintf(" (%v)", group.Seq)
+ }
+ return t
+}
+
+func (group *Group) Key(c appengine.Context) *ds.Key {
+ return ds.NewKey(c, "Group", group.hash(), 0, nil)
+}
+
+func (group *Group) hash() string {
+ return hash(fmt.Sprintf("%v-%v", group.Title, group.Seq))
+}
+
+type Crash struct {
+ Manager string
+ Tag string
+ Time time.Time
+ Log int64
+ Report int64
+}
+
+type Repro struct {
+ Crash
+ Opts string
+ Prog int64
+ CProg int64
+}
+
+const (
+ maxTextLen = 100
+ maxTitleLen = 200
+ maxLinkLen = 1000
+ maxOptsLen = 1000
+ maxCommentLen = 4000
+)
+
+func handleAddCrash(c appengine.Context, r *http.Request) (interface{}, error) {
+ req := new(dashboard.Crash)
+ if err := json.NewDecoder(r.Body).Decode(req); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal crash: %v", err)
+ }
+ if err := ds.RunInTransaction(c, func(c appengine.Context) error {
+ now := time.Now()
+ manager := r.FormValue("client")
+ crash := &Crash{
+ Manager: limitLength(manager, maxTextLen),
+ Tag: limitLength(req.Tag, maxTextLen),
+ //Title: limitLength(req.Desc, maxTitleLen),
+ Time: now,
+ }
+ var err error
+ if crash.Log, err = putText(c, "CrashLog", req.Log); err != nil {
+ return err
+ }
+ if crash.Report, err = putText(c, "CrashReport", req.Report); err != nil {
+ return err
+ }
+
+ group := &Group{Title: limitLength(req.Desc, maxTitleLen), Seq: 0}
+ for {
+ if err := ds.Get(c, group.Key(c), group); err != nil {
+ if err != ds.ErrNoSuchEntity {
+ return err
+ }
+ bug := &Bug{
+ Title: group.DisplayTitle(),
+ Status: BugStatusNew,
+ //Updated: now,
+ Groups: []string{group.hash()},
+ }
+ bugKey, err := ds.Put(c, ds.NewIncompleteKey(c, "Bug", nil), bug)
+ if err != nil {
+ return err
+ }
+ group.Bug = bugKey.IntID()
+ group.NumCrashes = 1
+ group.FirstTime = now
+ group.LastTime = now
+ group.Managers = []string{manager}
+ if _, err := ds.Put(c, group.Key(c), group); err != nil {
+ return err
+ }
+ break
+ }
+ bug := new(Bug)
+ if err := ds.Get(c, ds.NewKey(c, "Bug", "", group.Bug, nil), bug); err != nil {
+ return err
+ }
+ if bug.Status < BugStatusClosed {
+ group.NumCrashes++
+ group.LastTime = now
+ found := false
+ for _, manager1 := range group.Managers {
+ if manager1 == manager {
+ found = true
+ break
+ }
+ }
+ if !found {
+ group.Managers = append(group.Managers, manager)
+ }
+ if _, err := ds.Put(c, group.Key(c), group); err != nil {
+ return err
+ }
+ break
+ }
+ group.Seq++
+ }
+
+ if _, err := ds.Put(c, ds.NewIncompleteKey(c, "Crash", group.Key(c)), crash); err != nil {
+ return err
+ }
+ return nil
+ }, &ds.TransactionOptions{XG: true}); err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+func handleAddRepro(c appengine.Context, r *http.Request) (interface{}, error) {
+ req := new(dashboard.Repro)
+ if err := json.NewDecoder(r.Body).Decode(req); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal repro: %v", err)
+ }
+ if err := ds.RunInTransaction(c, func(c appengine.Context) error {
+ now := time.Now()
+ group := &Group{Title: limitLength(req.Crash.Desc, maxTitleLen), Seq: 0}
+ for {
+ if err := ds.Get(c, group.Key(c), group); err != nil {
+ return err
+ }
+ bug := new(Bug)
+ if err := ds.Get(c, ds.NewKey(c, "Bug", "", group.Bug, nil), bug); err != nil {
+ return err
+ }
+ if bug.Status < BugStatusClosed {
+ break
+ }
+ group.Seq++
+ }
+ group.NumRepro++
+ group.LastTime = now
+ if len(req.Prog) != 0 {
+ group.HasRepro = true
+ }
+ if len(req.CProg) != 0 {
+ group.HasCRepro = true
+ }
+ if _, err := ds.Put(c, group.Key(c), group); err != nil {
+ return err
+ }
+ if !req.Reproduced {
+ return nil
+ }
+
+ manager := r.FormValue("client")
+ crash := &Crash{
+ Manager: limitLength(manager, maxTextLen),
+ Tag: limitLength(req.Crash.Tag, maxTextLen),
+ Time: now,
+ }
+ var err error
+ if crash.Log, err = putText(c, "CrashLog", req.Crash.Log); err != nil {
+ return err
+ }
+ if crash.Report, err = putText(c, "CrashReport", req.Crash.Report); err != nil {
+ return err
+ }
+ repro := &Repro{
+ Crash: *crash,
+ Opts: limitLength(req.Opts, maxOptsLen),
+ }
+ if repro.Prog, err = putText(c, "ReproProg", req.Prog); err != nil {
+ return err
+ }
+ if repro.CProg, err = putText(c, "ReproCProg", req.CProg); err != nil {
+ return err
+ }
+
+ if _, err := ds.Put(c, ds.NewIncompleteKey(c, "Repro", group.Key(c)), repro); err != nil {
+ return err
+ }
+ return nil
+ }, &ds.TransactionOptions{XG: true}); err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+func handlePollPatches(c appengine.Context, r *http.Request) (interface{}, error) {
+ var bugs []*Bug
+ if _, err := ds.NewQuery("Bug").Filter("Status <", BugStatusClosed).GetAll(c, &bugs); err != nil {
+ return nil, fmt.Errorf("failed to fetch bugs: %v", err)
+ }
+ var maxTime time.Time
+ for _, bug := range bugs {
+ for _, patch := range bug.Patches {
+ if maxTime.Before(patch.Time) {
+ maxTime = patch.Time
+ }
+ }
+ }
+ return fmt.Sprint(maxTime.UnixNano()), nil
+}
+
+func handleGetPatches(c appengine.Context, r *http.Request) (interface{}, error) {
+ var bugs []*Bug
+ if _, err := ds.NewQuery("Bug").Filter("Status <", BugStatusClosed).GetAll(c, &bugs); err != nil {
+ return nil, fmt.Errorf("failed to fetch bugs: %v", err)
+ }
+ var patches []dashboard.Patch
+ for _, bug := range bugs {
+ for _, patch := range bug.Patches {
+ diff, err := getText(c, patch.Diff)
+ if err != nil {
+ return nil, err
+ }
+ patches = append(patches, dashboard.Patch{
+ Title: patch.Title,
+ Diff: diff,
+ })
+ }
+ }
+ return patches, nil
+}
+
+type GetPatchesResponse struct {
+ Hash string
+ Patches []*Patch
+ Ignores []string
+}
+
+type Text struct {
+ Tag string // any informative tag
+ Text []byte // gzip-compressed text
+}
+
+func putText(c appengine.Context, tag string, data []byte) (int64, error) {
+ if len(data) == 0 {
+ return 0, nil
+ }
+ b := new(bytes.Buffer)
+ z, _ := gzip.NewWriterLevel(b, gzip.BestCompression)
+ z.Write(data)
+ z.Close()
+ text := &Text{
+ Tag: tag,
+ Text: b.Bytes(),
+ }
+ key, err := ds.Put(c, ds.NewIncompleteKey(c, "Text", nil), text)
+ if err != nil {
+ return 0, err
+ }
+ return key.IntID(), nil
+}
+
+func getText(c appengine.Context, id int64) ([]byte, error) {
+ text := new(Text)
+ if err := ds.Get(c, ds.NewKey(c, "Text", "", id, nil), text); err != nil {
+ return nil, err
+ }
+ d, err := gzip.NewReader(bytes.NewBuffer(text.Text))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read text: %v", err)
+ }
+ data, err := ioutil.ReadAll(d)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read text: %v", err)
+ }
+ return data, nil
+}
+
+// limitLength essentially does return s[:max],
+// but it ensures that we dot not split UTF-8 rune in half.
+// Otherwise appengine python scripts will break badly.
+func limitLength(s string, max int) string {
+ s = strings.TrimSpace(s)
+ if len(s) <= max {
+ return s
+ }
+ for {
+ s = s[:max]
+ r, size := utf8.DecodeLastRuneInString(s)
+ if r != utf8.RuneError || size != 1 {
+ return s
+ }
+ max--
+ }
+}
diff --git a/syz-dash/app.yaml b/syz-dash/app.yaml
new file mode 100644
index 000000000..72aa23688
--- /dev/null
+++ b/syz-dash/app.yaml
@@ -0,0 +1,16 @@
+application: syzkaller
+version: 1
+runtime: go
+api_version: go1
+
+handlers:
+- url: /static
+ static_dir: static
+ secure: always
+- url: /(|bug|text|client)
+ script: _go_app
+ login: admin
+ secure: always
+- url: /api
+ script: _go_app
+ secure: always
diff --git a/syz-dash/bug.html b/syz-dash/bug.html
new file mode 100644
index 000000000..ffaf29272
--- /dev/null
+++ b/syz-dash/bug.html
@@ -0,0 +1,134 @@
+<!doctype html>
+<html>
+<head>
+ <title>Syzkaller Dashboard</title>
+ <link rel="stylesheet" href="/static/style.css"/>
+</head>
+<body>
+ <form action="/bug?id={{.ID}}" method="post">
+ <table>
+ <tr>
+ <td>Title:</td>
+ <td><input name="title" type="text" size="200" maxlength="200" value="{{.Title}}" required/></td>
+ </tr>
+ <tr>
+ <td>Status:</td>
+ <td><select name="status">
+ <option value="new" {{if eq .Status "new"}}selected{{end}}>New</option>
+ <option value="reported" {{if eq .Status "reported"}}selected{{end}}>Reported</option>
+ <option value="fixed" {{if eq .Status "fixed"}}selected{{end}}>Fixed</option>
+ <option value="unclear" {{if eq .Status "unclear"}}selected{{end}}>Unclear</option>
+ </select></td>
+ </tr>
+ <tr>
+ <td>Crashes:</td>
+ <td>{{.NumCrashes}}</td>
+ </tr>
+ <tr>
+ <td>Time:</td>
+ <td>{{formatTime .LastTime}} - {{formatTime .FirstTime}}</td>
+ </tr>
+ <tr>
+ <td>Happens on:</td>
+ <td>{{.Managers}}</td>
+ </tr>
+ <tr>
+ <td>Report link:</td>
+ <td><input name="report_link" type="text" size="200" maxlength="1000" value="{{.ReportLink}}"/></td>
+ </tr>
+ <tr>
+ <td>CVE:</td>
+ <td><input name="cve" type="text" size="200" maxlength="100" value="{{.CVE}}"/></td>
+ </tr>
+ <tr>
+ <td>Comment:</td>
+ <td><input name="comment" type="text" size="200" maxlength="4000" value="{{.Comment}}"/></td>
+ </tr>
+ </table>
+ <input name="ver" type="hidden" value="{{.Version}}"/>
+ <input type="submit" name="action" value="Update"/>
+ <input type="submit" name="action" value="Close" title="fixed and is not happening anymore
+new crashes won't be associated with this bug
+and instead produce a new bug"/>
+ <input type="submit" name="action" value="Delete" title="trash/not interesting
+new crashes will produce a new bug"/>
+ <b>{{.Message}}</b>
+ </form>
+ <br>
+
+ <form action="/bug?id={{$.ID}}" method="post">
+ Merge into:
+ <select name="bug_id">
+ {{range $b := $.AllBugs}}
+ <option value="{{$b.ID}}">{{$b.Title}}</option>
+ {{end}}
+ </select>
+ <input type="submit" name="action" value="Merge"/>
+ </form>
+ <br>
+
+ {{if $.Groups}}
+ <table>
+ <caption>Crash groups:</caption>
+ {{range $g := $.Groups}}
+ <tr>
+ <td>{{$g.Title}}</td>
+ <td>
+ <form action="/bug?id={{$.ID}}" method="post">
+ <input name="hash" type="hidden" value="{{$g.Hash}}"/>
+ <input type="submit" name="action" value="Unmerge"/>
+ </form>
+ </td>
+ </tr>
+ {{end}}
+ </table>
+ <br>
+ {{end}}
+
+ {{if $.Patches}}
+ <table>
+ {{range $p := $.Patches}}
+ <tr>
+ <td>Patch:</td>
+ <td><a href="/text?id={{$p.Diff}}">{{$p.Title}}</a></td>
+ <td>
+ <form action="/bug?id={{$.ID}}" method="post">
+ <input name="title" type="hidden" value="{{$p.Title}}"/>
+ <input type="submit" name="action" value="Delete patch"/>
+ </form>
+ </td>
+ </tr>
+ {{end}}
+ </table>
+ <br>
+ {{end}}
+
+ <form action="/bug?id={{$.ID}}" method="post">
+ <p><textarea name="patch" cols="88" rows="10" required></textarea></p>
+ <input type="submit" name="action" value="Add patch"/>
+ </form>
+ <br>
+
+ <table>
+ <caption>Crashes:</caption>
+ <tr>
+ <th>Title</th>
+ <th>Manager</th>
+ <th>Time</th>
+ <th>Tag</th>
+ <th>Log</th>
+ <th>Report</th>
+ </tr>
+ {{range $c := $.Crashes}}
+ <tr>
+ <td>{{$c.Title}}</td>
+ <td>{{$c.Manager}}</td>
+ <td>{{formatTime $c.Time}}</td>
+ <td>{{$c.Tag}}</td>
+ <td>{{if $c.Log}}<a href="/text?id={{$c.Log}}">log</a>{{end}}</td>
+ <td>{{if $c.Report}}<a href="/text?id={{$c.Report}}">report</a>{{end}}</td>
+ </tr>
+ {{end}}
+ </table>
+</body>
+</html>
diff --git a/syz-dash/dash.html b/syz-dash/dash.html
new file mode 100644
index 000000000..a7ca4159b
--- /dev/null
+++ b/syz-dash/dash.html
@@ -0,0 +1,34 @@
+{{define "bug_table"}}
+ <table>
+ <caption>{{.Name}}:</caption>
+ <tr>
+ <th>Title</th>
+ <th>Count</th>
+ <th>Time</th>
+ <th>Happens on</th>
+ </tr>
+ {{range $b := $.Bugs}}
+ <tr>
+ <td><a href="/bug?id={{$b.ID}}">{{$b.Title}}</a></td>
+ <td>{{$b.NumCrashes}}</td>
+ <td>{{formatTime $b.LastTime}} - {{formatTime $b.FirstTime}}</td>
+ <td>{{$b.Managers}}</td>
+ </tr>
+ {{end}}
+ </table>
+{{end}}
+
+<!doctype html>
+<html>
+<head>
+ <title>Syzkaller Dashboard</title>
+ <link rel="stylesheet" href="/static/style.css"/>
+</head>
+<body>
+ {{range $g := $.BugGroups}}
+ {{if $g.Bugs}}
+ {{template "bug_table" $g}} <br>
+ {{end}}
+ {{end}}
+</body>
+</html>
diff --git a/syz-dash/handler.go b/syz-dash/handler.go
new file mode 100644
index 000000000..e08a882f0
--- /dev/null
+++ b/syz-dash/handler.go
@@ -0,0 +1,542 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+// +build appengine
+
+package dash
+
+import (
+ "fmt"
+ "html/template"
+ "net/http"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "appengine"
+ ds "appengine/datastore"
+)
+
+func init() {
+ http.Handle("/", handlerWrapper(handleDash))
+ http.Handle("/bug", handlerWrapper(handleBug))
+ http.Handle("/text", handlerWrapper(handleText))
+ http.Handle("/client", handlerWrapper(handleClient))
+}
+
+func handlerWrapper(fn func(c appengine.Context, w http.ResponseWriter, r *http.Request) error) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+ if err := fn(c, w, r); err != nil {
+ c.Errorf("Error: %v", err)
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+ })
+}
+
+func handleClient(c appengine.Context, w http.ResponseWriter, r *http.Request) error {
+ name := r.FormValue("name")
+ if name == "" {
+ var clients []*Client
+ if _, err := ds.NewQuery("Client").GetAll(c, &clients); err != nil {
+ return fmt.Errorf("failed to fetch clients: %v", err)
+ }
+ for _, client := range clients {
+ fmt.Fprintf(w, "%v: %v<br>\n", client.Name, client.Key)
+ }
+ return nil
+ }
+ if !regexp.MustCompile("^[a-zA-Z0-9-_]{2,100}$").MatchString(name) {
+ return fmt.Errorf("bad name")
+ }
+ key := r.FormValue("key")
+ if !regexp.MustCompile("^[a-zA-Z0-9]{16,128}$").MatchString(key) {
+ return fmt.Errorf("bad key")
+ }
+ client := &Client{
+ Name: name,
+ Key: key,
+ }
+ if err := ds.Get(c, ds.NewKey(c, "Client", name, 0, nil), client); err == nil {
+ return fmt.Errorf("client already exists")
+ }
+ if _, err := ds.Put(c, ds.NewKey(c, "Client", name, 0, nil), client); err != nil {
+ return err
+ }
+ fmt.Fprintf(w, "added client")
+ return nil
+}
+
+func handleDash(c appengine.Context, w http.ResponseWriter, r *http.Request) error {
+ data := &dataDash{}
+ bugGroups := map[int]*uiBugGroup{
+ BugStatusNew: &uiBugGroup{Name: "New bugs"},
+ BugStatusReported: &uiBugGroup{Name: "Reported bugs"},
+ BugStatusUnclear: &uiBugGroup{Name: "Unclear bugs"},
+ BugStatusFixed: &uiBugGroup{Name: "Fixed bugs"},
+ }
+ data.BugGroups = append(data.BugGroups, bugGroups[BugStatusNew], bugGroups[BugStatusReported], bugGroups[BugStatusUnclear], bugGroups[BugStatusFixed])
+
+ var bugs []*Bug
+ var keys []*ds.Key
+ var err error
+ if keys, err = ds.NewQuery("Bug").Filter("Status <", BugStatusClosed).GetAll(c, &bugs); err != nil {
+ return fmt.Errorf("failed to fetch bugs: %v", err)
+ }
+ bugMap := make(map[int64]*uiBug)
+ managers := make(map[int64]map[string]bool)
+ for i, bug := range bugs {
+ id := keys[i].IntID()
+ ui := &uiBug{
+ ID: id,
+ Title: bug.Title,
+ Status: statusToString(bug.Status),
+ Comment: bug.Comment,
+ }
+ bugMap[id] = ui
+ managers[id] = make(map[string]bool)
+ bugGroups[bug.Status].Bugs = append(bugGroups[bug.Status].Bugs, ui)
+ }
+
+ var groups []*Group
+ if _, err := ds.NewQuery("Group").GetAll(c, &groups); err != nil {
+ return fmt.Errorf("failed to fetch crash groups: %v", err)
+ }
+ for _, group := range groups {
+ ui := bugMap[group.Bug]
+ if ui == nil {
+ return fmt.Errorf("failed to find bug for crash %v (%v)", group.Title, group.Seq)
+ }
+ ui.NumCrashes += group.NumCrashes
+ if ui.FirstTime.IsZero() || ui.FirstTime.After(group.FirstTime) {
+ ui.FirstTime = group.FirstTime
+ }
+ if ui.LastTime.IsZero() || ui.LastTime.Before(group.LastTime) {
+ ui.LastTime = group.LastTime
+ }
+ for _, mgr := range group.Managers {
+ managers[group.Bug][mgr] = true
+ }
+ }
+
+ for id, mgrs := range managers {
+ bug := bugMap[id]
+ var arr []string
+ for k := range mgrs {
+ arr = append(arr, k)
+ }
+ sort.Strings(arr)
+ bug.Managers = strings.Join(arr, ", ")
+ }
+
+ for _, group := range data.BugGroups {
+ sort.Sort(uiBugArray(group.Bugs))
+ }
+ return templateDash.Execute(w, data)
+}
+
+func handleBug(c appengine.Context, w http.ResponseWriter, r *http.Request) error {
+ id, err := strconv.ParseInt(r.FormValue("id"), 10, 64)
+ if err != nil {
+ return fmt.Errorf("failed to parse bug id: %v", err)
+ }
+
+ bug := new(Bug)
+ switch r.FormValue("action") {
+ case "Update":
+ ver, err := strconv.ParseInt(r.FormValue("ver"), 10, 64)
+ if err != nil {
+ return fmt.Errorf("failed to parse bug version: %v", err)
+ }
+ title := limitLength(r.FormValue("title"), maxTitleLen)
+ reportLink := limitLength(r.FormValue("report_link"), maxLinkLen)
+ cve := limitLength(r.FormValue("cve"), maxTextLen)
+ comment := limitLength(r.FormValue("comment"), maxCommentLen)
+ status, err := stringToStatus(r.FormValue("status"))
+ if err != nil {
+ return err
+ }
+ if title == "" {
+ return fmt.Errorf("title can't be empty")
+ }
+ switch status {
+ case BugStatusReported:
+ case BugStatusFixed:
+ case BugStatusUnclear:
+ if comment == "" {
+ return fmt.Errorf("enter comment as to why it's unclear")
+ }
+ }
+
+ if err := ds.RunInTransaction(c, func(c appengine.Context) error {
+ if err := ds.Get(c, ds.NewKey(c, "Bug", "", id, nil), bug); err != nil {
+ return err
+ }
+ if bug.Version != ver {
+ return fmt.Errorf("bug has changed by somebody else")
+ }
+ bug.Title = title
+ bug.Status = status
+ bug.ReportLink = reportLink
+ bug.CVE = cve
+ bug.Comment = comment
+ bug.Version++
+ if _, err := ds.Put(c, ds.NewKey(c, "Bug", "", id, nil), bug); err != nil {
+ return err
+ }
+ return nil
+ }, nil); err != nil {
+ return err
+ }
+ case "Merge":
+ otherID, err := strconv.ParseInt(r.FormValue("bug_id"), 10, 64)
+ if err != nil {
+ return fmt.Errorf("failed to parse bug id: %v", err)
+ }
+ if err := ds.RunInTransaction(c, func(c appengine.Context) error {
+ srcBug := new(Bug)
+ if err := ds.Get(c, ds.NewKey(c, "Bug", "", id, nil), srcBug); err != nil {
+ return err
+ }
+ dstBug := new(Bug)
+ if err := ds.Get(c, ds.NewKey(c, "Bug", "", otherID, nil), dstBug); err != nil {
+ return err
+ }
+ var groupKeys []*ds.Key
+ var groups []*Group
+ for _, hash := range srcBug.Groups {
+ groupKeys = append(groupKeys, ds.NewKey(c, "Group", hash, 0, nil))
+ groups = append(groups, new(Group))
+ }
+ if err := ds.GetMulti(c, groupKeys, groups); err != nil {
+ return fmt.Errorf("failed to fetch crash groups: %v", err)
+ }
+ for _, group := range groups {
+ group.Bug = otherID
+ if _, err := ds.Put(c, group.Key(c), group); err != nil {
+ return err
+ }
+ }
+ dstBug.Groups = append(dstBug.Groups, srcBug.Groups...)
+ if _, err := ds.Put(c, ds.NewKey(c, "Bug", "", otherID, nil), dstBug); err != nil {
+ return err
+ }
+ if err := ds.Delete(c, ds.NewKey(c, "Bug", "", id, nil)); err != nil {
+ return err
+ }
+ id = otherID
+ bug = dstBug
+ return nil
+ }, &ds.TransactionOptions{XG: true}); err != nil {
+ return err
+ }
+ http.Redirect(w, r, fmt.Sprintf("bug?id=%v", otherID), http.StatusMovedPermanently)
+ return nil
+ case "Unmerge":
+ hash := r.FormValue("hash")
+ if err := ds.RunInTransaction(c, func(c appengine.Context) error {
+ if err := ds.Get(c, ds.NewKey(c, "Bug", "", id, nil), bug); err != nil {
+ return err
+ }
+ group := new(Group)
+ if err := ds.Get(c, ds.NewKey(c, "Group", hash, 0, nil), group); err != nil {
+ return err
+ }
+ found := false
+ for i, hash1 := range bug.Groups {
+ if hash == hash1 {
+ found = true
+ copy(bug.Groups[i:], bug.Groups[i+1:])
+ bug.Groups = bug.Groups[:len(bug.Groups)-1]
+ break
+ }
+ }
+ if !found {
+ return fmt.Errorf("group is not found")
+ }
+ if _, err := ds.Put(c, ds.NewKey(c, "Bug", "", id, nil), bug); err != nil {
+ return err
+ }
+
+ newBug := &Bug{
+ Title: group.DisplayTitle(),
+ Status: BugStatusNew,
+ //Updated: now,
+ Groups: []string{group.hash()},
+ }
+ bugKey, err := ds.Put(c, ds.NewIncompleteKey(c, "Bug", nil), newBug)
+ if err != nil {
+ return err
+ }
+ group.Bug = bugKey.IntID()
+ if _, err := ds.Put(c, group.Key(c), group); err != nil {
+ return err
+ }
+ return nil
+ }, &ds.TransactionOptions{XG: true}); err != nil {
+ return err
+ }
+ case "Add patch":
+ title, diff, err := parsePatch(r.FormValue("patch"))
+ if err != nil {
+ return fmt.Errorf("failed to parse patch: %v", err)
+ }
+ if err := ds.RunInTransaction(c, func(c appengine.Context) error {
+ if err := ds.Get(c, ds.NewKey(c, "Bug", "", id, nil), bug); err != nil {
+ return err
+ }
+ for _, patch := range bug.Patches {
+ if patch.Title == title {
+ return fmt.Errorf("patch is already attached: %v", title)
+ }
+ }
+ diffID, err := putText(c, "PatchDiff", []byte(diff))
+ if err != nil {
+ return err
+ }
+ bug.Patches = append(bug.Patches, Patch{
+ Title: title,
+ Diff: diffID,
+ Time: time.Now(),
+ })
+ if _, err := ds.Put(c, ds.NewKey(c, "Bug", "", id, nil), bug); err != nil {
+ return fmt.Errorf("failed to save bug: %v", err)
+ }
+ return nil
+ }, &ds.TransactionOptions{XG: true}); err != nil {
+ return err
+ }
+ case "Delete patch":
+ title := r.FormValue("title")
+ if err := ds.RunInTransaction(c, func(c appengine.Context) error {
+ if err := ds.Get(c, ds.NewKey(c, "Bug", "", id, nil), bug); err != nil {
+ return err
+ }
+ found := false
+ for i, patch := range bug.Patches {
+ if patch.Title == title {
+ found = true
+ copy(bug.Patches[i:], bug.Patches[i+1:])
+ bug.Patches = bug.Patches[:len(bug.Patches)-1]
+ break
+ }
+ }
+ if !found {
+ return fmt.Errorf("no such patch")
+ }
+ if _, err := ds.Put(c, ds.NewKey(c, "Bug", "", id, nil), bug); err != nil {
+ return fmt.Errorf("failed to save bug: %v", err)
+ }
+ return nil
+ }, &ds.TransactionOptions{XG: true}); err != nil {
+ return err
+ }
+ case "":
+ if err := ds.Get(c, ds.NewKey(c, "Bug", "", id, nil), bug); err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("unknown action '%v'", r.FormValue("action"))
+ }
+
+ data := &dataBug{}
+ data.ID = id
+ data.Version = bug.Version
+ data.Title = bug.Title
+ data.ReportLink = bug.ReportLink
+ data.CVE = bug.CVE
+ data.Comment = bug.Comment
+ data.Status = statusToString(bug.Status)
+ data.Patches = bug.Patches
+ //data.Updated = bug.Updated
+
+ var bugs []*Bug
+ var keys []*ds.Key
+ if keys, err = ds.NewQuery("Bug").Filter("Status <", BugStatusClosed).GetAll(c, &bugs); err != nil {
+ return fmt.Errorf("failed to fetch bugs: %v", err)
+ }
+ for i, bug1 := range bugs {
+ id1 := keys[i].IntID()
+ if id1 == id {
+ continue
+ }
+ data.AllBugs = append(data.AllBugs, &uiBug{
+ ID: id1,
+ Title: fmt.Sprintf("%v (%v)", bug1.Title, statusToString(bug1.Status)),
+ })
+ }
+ sort.Sort(uiBugTitleSorter(data.AllBugs))
+
+ managers := make(map[string]bool)
+
+ var groups []*Group
+ if _, err := ds.NewQuery("Group").Filter("Bug=", id).GetAll(c, &groups); err != nil {
+ return fmt.Errorf("failed to fetch crash groups: %v", err)
+ }
+ for _, group := range groups {
+ data.NumCrashes += group.NumCrashes
+ if data.FirstTime.IsZero() || data.FirstTime.After(group.FirstTime) {
+ data.FirstTime = group.FirstTime
+ }
+ if data.LastTime.IsZero() || data.LastTime.Before(group.LastTime) {
+ data.LastTime = group.LastTime
+ }
+ for _, mgr := range group.Managers {
+ managers[mgr] = true
+ }
+ data.Groups = append(data.Groups, &uiGroup{group.DisplayTitle(), group.hash()})
+
+ var crashes []*Crash
+ if _, err := ds.NewQuery("Crash").Ancestor(group.Key(c)).GetAll(c, &crashes); err != nil {
+ return fmt.Errorf("failed to fetch crashes: %v", err)
+ }
+ for _, crash := range crashes {
+ data.Crashes = append(data.Crashes, &uiCrash{
+ Title: group.DisplayTitle(),
+ Manager: crash.Manager,
+ Tag: crash.Tag,
+ Time: crash.Time,
+ Log: crash.Log,
+ Report: crash.Report,
+ })
+ }
+ }
+
+ sort.Sort(uiCrashArray(data.Crashes))
+
+ if len(data.Groups) == 1 {
+ data.Groups = nil
+ }
+
+ var arr []string
+ for k := range managers {
+ arr = append(arr, k)
+ }
+ sort.Strings(arr)
+ data.Managers = strings.Join(arr, ", ")
+
+ return templateBug.Execute(w, data)
+}
+
+func handleText(c appengine.Context, w http.ResponseWriter, r *http.Request) error {
+ id, err := strconv.ParseInt(r.FormValue("id"), 10, 64)
+ if err != nil {
+ return fmt.Errorf("failed to parse text id: %v", err)
+ }
+ data, err := getText(c, id)
+ if err != nil {
+ return err
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.Write(data)
+ return nil
+}
+
+type dataDash struct {
+ BugGroups []*uiBugGroup
+}
+
+type dataBug struct {
+ uiBug
+ Crashes []*uiCrash
+ Message string
+ AllBugs []*uiBug
+}
+
+type uiBugGroup struct {
+ Name string
+ Bugs []*uiBug
+}
+
+type uiGroup struct {
+ Title string
+ Hash string
+}
+
+type uiBug struct {
+ ID int64
+ Version int64
+ Title string
+ Status string
+ NumCrashes int64
+ FirstTime time.Time
+ LastTime time.Time
+ Updated time.Time
+ Managers string
+ ReportLink string
+ Comment string
+ CVE string
+ Groups []*uiGroup
+ Patches []Patch
+}
+
+type uiCrash struct {
+ Title string
+ Manager string
+ Tag string
+ Time time.Time
+ Log int64
+ Report int64
+}
+
+type uiBugArray []*uiBug
+
+func (a uiBugArray) Len() int {
+ return len(a)
+}
+
+func (a uiBugArray) Less(i, j int) bool {
+ return a[i].LastTime.After(a[j].LastTime)
+}
+
+func (a uiBugArray) Swap(i, j int) {
+ a[i], a[j] = a[j], a[i]
+}
+
+type uiBugTitleSorter []*uiBug
+
+func (a uiBugTitleSorter) Len() int {
+ return len(a)
+}
+
+func (a uiBugTitleSorter) Less(i, j int) bool {
+ return a[i].Title < a[j].Title
+}
+
+func (a uiBugTitleSorter) Swap(i, j int) {
+ a[i], a[j] = a[j], a[i]
+}
+
+type uiCrashArray []*uiCrash
+
+func (a uiCrashArray) Len() int {
+ return len(a)
+}
+
+func (a uiCrashArray) Less(i, j int) bool {
+ return a[i].Time.Before(a[j].Time)
+}
+
+func (a uiCrashArray) Swap(i, j int) {
+ a[i], a[j] = a[j], a[i]
+}
+
+type dataPatches struct {
+ Message string
+ Patches []*Patch
+}
+
+var tmplFuncs = template.FuncMap{
+ "formatTime": formatTime,
+}
+
+func formatTime(t time.Time) string {
+ return t.Format("Jan 02 15:04")
+}
+
+var (
+ templateDash = template.Must(template.New("dash.html").Funcs(tmplFuncs).ParseFiles("dash.html"))
+ templateBug = template.Must(template.New("bug.html").Funcs(tmplFuncs).ParseFiles("bug.html"))
+)
diff --git a/syz-dash/patch.go b/syz-dash/patch.go
new file mode 100644
index 000000000..d3e6b4773
--- /dev/null
+++ b/syz-dash/patch.go
@@ -0,0 +1,70 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package dash
+
+import (
+ "bufio"
+ "fmt"
+ "strings"
+)
+
+func parsePatch(text string) (title string, diff string, err error) {
+ s := bufio.NewScanner(strings.NewReader(text))
+ parsingDiff := false
+ diffStarted := false
+ lastLine := ""
+ for s.Scan() {
+ ln := s.Text()
+ if strings.HasPrefix(ln, "--- a/") {
+ parsingDiff = true
+ if title == "" {
+ title = lastLine
+ }
+ }
+ if parsingDiff {
+ if ln == "--" || ln == "-- " {
+ break
+ }
+ diff += ln + "\n"
+ continue
+ }
+ if strings.HasPrefix(ln, "diff --git") {
+ diffStarted = true
+ continue
+ }
+ if strings.HasPrefix(ln, "Subject: ") {
+ ln = ln[len("Subject: "):]
+ if strings.Contains(strings.ToLower(ln), "[patch") {
+ pos := strings.IndexByte(ln, ']')
+ if pos == -1 {
+ err = fmt.Errorf("subject line does not contain ']'")
+ return
+ }
+ ln = ln[pos+1:]
+ }
+ title = ln
+ continue
+ }
+ if ln == "" || title != "" || diffStarted {
+ continue
+ }
+ lastLine = ln
+ if strings.HasPrefix(ln, " ") {
+ title = ln[4:]
+ }
+ }
+ if err = s.Err(); err != nil {
+ return
+ }
+ title = strings.TrimSpace(title)
+ if title == "" {
+ err = fmt.Errorf("failed to extract title")
+ return
+ }
+ if diff == "" {
+ err = fmt.Errorf("failed to extract diff")
+ return
+ }
+ return
+}
diff --git a/syz-dash/patch_test.go b/syz-dash/patch_test.go
new file mode 100644
index 000000000..5d45a3187
--- /dev/null
+++ b/syz-dash/patch_test.go
@@ -0,0 +1,288 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package dash
+
+import (
+ "testing"
+)
+
+func TestParsePatch(t *testing.T) {
+ for _, test := range tests {
+ t.Run(test.title, func(t *testing.T) {
+ title, diff, err := parsePatch(test.text)
+ if err != nil {
+ t.Fatalf("failed to parse patch: %v", err)
+ }
+ if test.title != title {
+ t.Fatalf("title mismatch, want:\n%v\ngot:\n%v", test.title, title)
+ }
+ if test.diff != diff {
+ t.Fatalf("diff mismatch, want:\n%v\ngot:\n%v", test.diff, diff)
+ }
+ })
+ }
+}
+
+var tests = []struct {
+ text string
+ title string
+ diff string
+}{
+ {
+ text: `
+commit 7bdb59aaaaaa4bd7161adc8f923cdef10f2638d1
+Author: Some foo-bar áš <foo@bar.com>
+Date: Tue Feb 7 17:44:54 2017 +0100
+
+ net/tcp: fix foo()
+
+ foo->bar is wrong.
+ Fix foo().
+
+ More description.
+
+ Signed-off-by: Some foo-bar áš <foo@bar.com>
+ Reviewed: Some foo-bar <foo@bar.com>
+ Link: http://lkml.kernel.org/r/123123123123-123-1-git-send-email-foo@bar.com
+
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 74e0388cc88d..fc6f740d0277 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -725,6 +725,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
+ */
+ if (delta == 0) {
+ tick_nohz_restart(ts, now);
++ /*
++ * Make sure next tick stop doesn't get fooled by past
++ * clock deadline
++ */
++ ts->next_tick = 0;
+ goto out;
+ }
+ }
+`,
+ title: "net/tcp: fix foo()",
+ diff: `--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -725,6 +725,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
+ */
+ if (delta == 0) {
+ tick_nohz_restart(ts, now);
++ /*
++ * Make sure next tick stop doesn't get fooled by past
++ * clock deadline
++ */
++ ts->next_tick = 0;
+ goto out;
+ }
+ }
+`,
+ },
+
+ {
+ text: `
+fix looking up invalid subclass: 4294967295
+
+diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
+index acbe61c..160dc89 100644
+--- a/net/irda/irqueue.c
++++ b/net/irda/irqueue.c
+@@ -383,9 +383,6 @@ EXPORT_SYMBOL(hashbin_new);
+ * for deallocating this structure if it's complex. If not the user can
+ * just supply kfree, which should take care of the job.
+ */
+-#ifdef CONFIG_LOCKDEP
+-static int hashbin_lock_depth = 0;
+-#endif
+ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
+ {
+ irda_queue_t* queue;
+`,
+ title: "fix looking up invalid subclass: 4294967295",
+ diff: `--- a/net/irda/irqueue.c
++++ b/net/irda/irqueue.c
+@@ -383,9 +383,6 @@ EXPORT_SYMBOL(hashbin_new);
+ * for deallocating this structure if it's complex. If not the user can
+ * just supply kfree, which should take care of the job.
+ */
+-#ifdef CONFIG_LOCKDEP
+-static int hashbin_lock_depth = 0;
+-#endif
+ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
+ {
+ irda_queue_t* queue;
+`,
+ },
+
+ {
+ text: `net: fix looking up invalid subclass: 4294967295
+diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
+index acbe61c..160dc89 100644
+--- a/net/irda/irqueue.c
++++ b/net/irda/irqueue.c
+@@ -383,9 +383,6 @@ EXPORT_SYMBOL(hashbin_new);
+ * for deallocating this structure if it's complex. If not the user can
+ * just supply kfree, which should take care of the job.
+ */
+-#ifdef CONFIG_LOCKDEP
+-static int hashbin_lock_depth = 0;
+-#endif
+ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)`,
+ title: "net: fix looking up invalid subclass: 4294967295",
+ diff: `--- a/net/irda/irqueue.c
++++ b/net/irda/irqueue.c
+@@ -383,9 +383,6 @@ EXPORT_SYMBOL(hashbin_new);
+ * for deallocating this structure if it's complex. If not the user can
+ * just supply kfree, which should take care of the job.
+ */
+-#ifdef CONFIG_LOCKDEP
+-static int hashbin_lock_depth = 0;
+-#endif
+ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
+`,
+ },
+
+ {
+ text: `
+Delivered-To: foo@bar.com
+Date: Tue, 31 Jan 2017 15:24:03 +0100 (CET)
+To: Foo Bar <foo@bar.com>
+Subject: [PATCH v2] timerfd: Protect the might cancel mechanism proper
+MIME-Version: 1.0
+Content-Type: text/plain; charset=US-ASCII
+
+The handling of the might_cancel queueing is not properly protected, so
+parallel operations on the file descriptor can race with each other and
+lead to list corruptions or use after free.
+
+Protect the context for these operations with a seperate lock.
+
+Reported-by: Foo Bar <foo@bar.com>
+Signed-off-by: Foo Bar <foo@bar.com>
+---
+ fs/timerfd.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+--- a/fs/timerfd.c
++++ b/fs/timerfd.c
+@@ -40,6 +40,7 @@ struct timerfd_ctx {
+ short unsigned settime_flags; /* to show in fdinfo */
+ struct rcu_head rcu;
+ struct list_head clist;
++ spinlock_t cancel_lock;
+ bool might_cancel;
+ };
+`,
+ title: "timerfd: Protect the might cancel mechanism proper",
+ diff: `--- a/fs/timerfd.c
++++ b/fs/timerfd.c
+@@ -40,6 +40,7 @@ struct timerfd_ctx {
+ short unsigned settime_flags; /* to show in fdinfo */
+ struct rcu_head rcu;
+ struct list_head clist;
++ spinlock_t cancel_lock;
+ bool might_cancel;
+ };
+`,
+ },
+
+ {
+ text: `crypto/sha512-mb: Correct initialization value for lane lens
+diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
+index 36870b2..5484d77 100644
+--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
++++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
+@@ -57,10 +57,10 @@ void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state)
+ {
+ unsigned int j;
+
+- state->lens[0] = 0;
+- state->lens[1] = 1;
+- state->lens[2] = 2;
+- state->lens[3] = 3;
++ state->lens[0] = 0xFFFFFFFF00000000;
++ state->lens[1] = 0xFFFFFFFF00000001;
++ state->lens[2] = 0xFFFFFFFF00000002;
++ state->lens[3] = 0xFFFFFFFF00000003;
+ state->unused_lanes = 0xFF03020100;
+ for (j = 0; j < 4; j++)
+ state->ldata[j].job_in_lane = NULL;
+--
+2.5.5`,
+ title: "crypto/sha512-mb: Correct initialization value for lane lens",
+ diff: `--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
++++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
+@@ -57,10 +57,10 @@ void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state)
+ {
+ unsigned int j;
+
+- state->lens[0] = 0;
+- state->lens[1] = 1;
+- state->lens[2] = 2;
+- state->lens[3] = 3;
++ state->lens[0] = 0xFFFFFFFF00000000;
++ state->lens[1] = 0xFFFFFFFF00000001;
++ state->lens[2] = 0xFFFFFFFF00000002;
++ state->lens[3] = 0xFFFFFFFF00000003;
+ state->unused_lanes = 0xFF03020100;
+ for (j = 0; j < 4; j++)
+ state->ldata[j].job_in_lane = NULL;
+`,
+ },
+
+ {
+ text: `
+Subject: [Patch net] kcm: fix a null pointer dereference in kcm_sendmsg()
+
+--- a/fs/timerfd.c
++++ b/fs/timerfd.c
+@@ -40,6 +40,7 @@ struct timerfd_ctx {
+ short unsigned settime_flags; /* to show in fdinfo */
+ struct rcu_head rcu;
+ struct list_head clist;
++ spinlock_t cancel_lock;
+ bool might_cancel;
+ };
+`,
+ title: "kcm: fix a null pointer dereference in kcm_sendmsg()",
+ diff: `--- a/fs/timerfd.c
++++ b/fs/timerfd.c
+@@ -40,6 +40,7 @@ struct timerfd_ctx {
+ short unsigned settime_flags; /* to show in fdinfo */
+ struct rcu_head rcu;
+ struct list_head clist;
++ spinlock_t cancel_lock;
+ bool might_cancel;
+ };
+`,
+ },
+
+ {
+ text: `
+Subject: Re: [PATCH v3] net/irda: fix lockdep annotation
+
+--- a/fs/timerfd.c
++++ b/fs/timerfd.c
+@@ -40,6 +40,7 @@ struct timerfd_ctx {
+ short unsigned settime_flags; /* to show in fdinfo */
+ struct rcu_head rcu;
+ struct list_head clist;
++ spinlock_t cancel_lock;
+ bool might_cancel;
+ };
+`,
+ title: "net/irda: fix lockdep annotation",
+ diff: `--- a/fs/timerfd.c
++++ b/fs/timerfd.c
+@@ -40,6 +40,7 @@ struct timerfd_ctx {
+ short unsigned settime_flags; /* to show in fdinfo */
+ struct rcu_head rcu;
+ struct list_head clist;
++ spinlock_t cancel_lock;
+ bool might_cancel;
+ };
+`,
+ },
+}
diff --git a/syz-dash/static/style.css b/syz-dash/static/style.css
new file mode 100644
index 000000000..edd3be3a8
--- /dev/null
+++ b/syz-dash/static/style.css
@@ -0,0 +1,15 @@
+table {
+ border-collapse:collapse;
+ border:1px solid;
+}
+table caption {
+ font-weight: bold;
+}
+table td {
+ border:1px solid;
+ padding: 3px;
+}
+table th {
+ border:1px solid;
+ padding: 3px;
+}
diff --git a/syz-gce/generated.go b/syz-gce/generated.go
index 364583808..ce52adeed 100644
--- a/syz-gce/generated.go
+++ b/syz-gce/generated.go
@@ -365,4 +365,3 @@ echo -n "$4" > tag
tar -czvf image.tar.gz disk.tar.gz key tag obj/vmlinux
rm -rf tag obj
`
-
diff --git a/syz-gce/syz-gce.go b/syz-gce/syz-gce.go
index 7d0f85f3f..821cda28a 100644
--- a/syz-gce/syz-gce.go
+++ b/syz-gce/syz-gce.go
@@ -18,6 +18,7 @@ package main
import (
"archive/tar"
+ "bytes"
"compress/gzip"
"encoding/json"
"flag"
@@ -38,6 +39,7 @@ import (
"cloud.google.com/go/storage"
"github.com/google/syzkaller/config"
+ "github.com/google/syzkaller/dashboard"
"github.com/google/syzkaller/gce"
. "github.com/google/syzkaller/log"
"golang.org/x/net/context"
@@ -51,6 +53,8 @@ var (
storageClient *storage.Client
GCE *gce.Context
managerHttpPort uint32
+ patchesHash string
+ patches []dashboard.Patch
)
type Config struct {
@@ -69,6 +73,15 @@ type Config struct {
Linux_Branch string
Linux_Compiler string
Linux_Userspace string
+
+ Dashboard_Addr string
+ Dashboard_Key string
+}
+
+type Action interface {
+ Name() string
+ Poll() (string, error)
+ Build() error
}
func main() {
@@ -99,15 +112,45 @@ func main() {
sigC := make(chan os.Signal, 2)
signal.Notify(sigC, syscall.SIGINT, syscall.SIGUSR1)
+ var actions []Action
+ actions = append(actions, new(SyzkallerAction))
+ if cfg.Image_Archive == "local" {
+ if syscall.Getuid() != 0 {
+ Fatalf("building local image requires root")
+ }
+ if cfg.Dashboard_Addr != "" {
+ actions = append(actions, &DashboardAction{
+ Dash: &dashboard.Dashboard{
+ Addr: cfg.Dashboard_Addr,
+ Client: cfg.Name,
+ Key: cfg.Dashboard_Key,
+ },
+ })
+ }
+ actions = append(actions, &LocalBuildAction{
+ Dir: abs(wd, "build"),
+ Repo: cfg.Linux_Git,
+ Branch: cfg.Linux_Branch,
+ Compiler: cfg.Linux_Compiler,
+ UserspaceDir: abs(wd, cfg.Linux_Userspace),
+ ImagePath: cfg.Image_Path,
+ ImageName: cfg.Image_Name,
+ })
+ } else {
+ actions = append(actions, &GCSImageAction{
+ ImageArchive: cfg.Image_Archive,
+ ImagePath: cfg.Image_Path,
+ ImageName: cfg.Image_Name,
+ })
+ }
+ currHashes := make(map[string]string)
+ nextHashes := make(map[string]string)
+
var managerCmd *exec.Cmd
managerStopped := make(chan error)
stoppingManager := false
- var lastImageUpdated time.Time
- lastSyzkallerHash := ""
- lastLinuxHash := ""
- buildDir := abs(wd, "build")
- linuxDir := filepath.Join(buildDir, "linux")
var delayDuration time.Duration
+loop:
for {
if delayDuration != 0 {
Logf(0, "sleep for %v", delayDuration)
@@ -146,45 +189,27 @@ func main() {
}
}
}
- delayDuration = 10 * time.Minute // assume that an error happened
+ delayDuration = 15 * time.Minute // assume that an error happened
- // Poll syzkaller repo.
- syzkallerHash, err := updateSyzkallerBuild()
- if err != nil {
- Logf(0, "failed to update syzkaller: %v", err)
- continue
- }
-
- // Poll kernel git repo or GCS image.
- var imageArchive *storage.ObjectHandle
- var imageUpdated time.Time
- linuxHash := ""
- if cfg.Image_Archive == "local" {
- if syscall.Getuid() != 0 {
- Fatalf("building local image requires root")
- }
- var err error
- linuxHash, err = gitUpdate(linuxDir, cfg.Linux_Git, cfg.Linux_Branch)
+ Logf(0, "polling...")
+ for _, a := range actions {
+ hash, err := a.Poll()
if err != nil {
- Logf(0, "%v", err)
- delayDuration = time.Hour // cloning linux is expensive
- continue
+ Logf(0, "failed to poll %v: %v", a.Name(), err)
+ continue loop
}
- Logf(0, "kernel hash %v, syzkaller hash %v", linuxHash, syzkallerHash)
- } else {
- var err error
- imageArchive, imageUpdated, err = openFile(cfg.Image_Archive)
- if err != nil {
- Logf(0, "%v", err)
- continue
+ nextHashes[a.Name()] = hash
+ }
+ changed := managerCmd == nil
+ for _, a := range actions {
+ next := nextHashes[a.Name()]
+ curr := currHashes[a.Name()]
+ if curr != next {
+ Logf(0, "%v changed %v -> %v", a, curr, next)
+ changed = true
}
- Logf(0, "image update time %v, syzkaller hash %v", imageUpdated, syzkallerHash)
}
-
- if lastImageUpdated == imageUpdated &&
- lastLinuxHash == linuxHash &&
- lastSyzkallerHash == syzkallerHash &&
- managerCmd != nil {
+ if !changed {
// Nothing has changed, sleep for another hour.
delayDuration = time.Hour
continue
@@ -204,99 +229,17 @@ func main() {
continue
}
- // Download and extract image from GCS.
- if lastImageUpdated != imageUpdated {
- Logf(0, "downloading image archive...")
- if err := os.RemoveAll("image"); err != nil {
- Logf(0, "failed to remove image dir: %v", err)
- continue
- }
- if err := downloadAndExtract(imageArchive, "image"); err != nil {
- Logf(0, "failed to download and extract %v: %v", cfg.Image_Archive, err)
- continue
- }
-
- Logf(0, "uploading image...")
- if err := uploadFile("image/disk.tar.gz", cfg.Image_Path); err != nil {
- Logf(0, "failed to upload image: %v", err)
- continue
- }
-
- Logf(0, "creating gce image...")
- if err := GCE.DeleteImage(cfg.Image_Name); err != nil {
- Logf(0, "failed to delete GCE image: %v", err)
- continue
- }
- if err := GCE.CreateImage(cfg.Image_Name, cfg.Image_Path); err != nil {
- Logf(0, "failed to create GCE image: %v", err)
- continue
- }
- }
- lastImageUpdated = imageUpdated
-
- // Rebuild kernel.
- if lastLinuxHash != linuxHash {
- Logf(0, "building linux kernel...")
- if err := buildKernel(linuxDir, cfg.Linux_Compiler); err != nil {
- Logf(0, "build failed: %v", err)
- continue
- }
-
- scriptFile := filepath.Join(buildDir, "create-gce-image.sh")
- if err := ioutil.WriteFile(scriptFile, []byte(createImageScript), 0700); err != nil {
- Logf(0, "failed to write script file: %v", err)
- continue
- }
-
- Logf(0, "building image...")
- vmlinux := filepath.Join(linuxDir, "vmlinux")
- bzImage := filepath.Join(linuxDir, "arch/x86/boot/bzImage")
- if _, err := runCmd(buildDir, scriptFile, abs(wd, cfg.Linux_Userspace), bzImage, vmlinux, linuxHash); err != nil {
- Logf(0, "image build failed: %v", err)
- continue
- }
- os.Remove(filepath.Join(buildDir, "disk.raw"))
- os.Remove(filepath.Join(buildDir, "image.tar.gz"))
- os.MkdirAll("image/obj", 0700)
- if err := ioutil.WriteFile("image/tag", []byte(linuxHash), 0600); err != nil {
- Logf(0, "failed to write tag file: %v", err)
- continue
- }
- if err := os.Rename(filepath.Join(buildDir, "key"), "image/key"); err != nil {
- Logf(0, "failed to rename key file: %v", err)
- continue
- }
- if err := os.Rename(vmlinux, "image/obj/vmlinux"); err != nil {
- Logf(0, "failed to rename vmlinux file: %v", err)
- continue
- }
- Logf(0, "uploading image...")
- if err := uploadFile(filepath.Join(buildDir, "disk.tar.gz"), cfg.Image_Path); err != nil {
- Logf(0, "failed to upload image: %v", err)
- continue
- }
-
- Logf(0, "creating gce image...")
- if err := GCE.DeleteImage(cfg.Image_Name); err != nil {
- Logf(0, "failed to delete GCE image: %v", err)
- continue
- }
- if err := GCE.CreateImage(cfg.Image_Name, cfg.Image_Path); err != nil {
- Logf(0, "failed to create GCE image: %v", err)
+ for _, a := range actions {
+ if currHashes[a.Name()] == nextHashes[a.Name()] {
continue
}
- }
- lastLinuxHash = linuxHash
-
- // Rebuild syzkaller.
- if lastSyzkallerHash != syzkallerHash {
- Logf(0, "building syzkaller...")
- if _, err := runCmd("gopath/src/github.com/google/syzkaller", "make"); err != nil {
- Logf(0, "failed to update/build syzkaller: %v", err)
- continue
+ Logf(0, "building %v...", a.Name())
+ if err := a.Build(); err != nil {
+ Logf(0, "building %v failed: %v", a.Name(), err)
+ continue loop
}
+ currHashes[a.Name()] = nextHashes[a.Name()]
}
- lastSyzkallerHash = syzkallerHash
// Restart syz-manager.
port, err := chooseUnusedPort()
@@ -304,7 +247,7 @@ func main() {
Logf(0, "failed to choose an unused port: %v", err)
continue
}
- if err := writeManagerConfig(port, "manager.cfg"); err != nil {
+ if err := writeManagerConfig(cfg, port, "manager.cfg"); err != nil {
Logf(0, "failed to write manager config: %v", err)
continue
}
@@ -325,6 +268,211 @@ func main() {
}
}
+type SyzkallerAction struct {
+}
+
+func (a *SyzkallerAction) Name() string {
+ return "syzkaller"
+}
+
+// Poll executes 'git pull' on syzkaller and all depenent packages.
+// Returns syzkaller HEAD hash.
+func (a *SyzkallerAction) Poll() (string, error) {
+ if _, err := runCmd("", "go", "get", "-u", "-d", "github.com/google/syzkaller/syz-manager"); err != nil {
+ return "", err
+ }
+ return gitRevision("gopath/src/github.com/google/syzkaller")
+}
+
+func (a *SyzkallerAction) Build() error {
+ if _, err := runCmd("gopath/src/github.com/google/syzkaller", "make"); err != nil {
+ return err
+ }
+ return nil
+}
+
+type DashboardAction struct {
+ Dash *dashboard.Dashboard
+}
+
+func (a *DashboardAction) Name() string {
+ return "dashboard"
+}
+
+func (a *DashboardAction) Poll() (hash string, err error) {
+ patchesHash, err = a.Dash.PollPatches()
+ return patchesHash, err
+}
+
+func (a *DashboardAction) Build() (err error) {
+ patches, err = a.Dash.GetPatches()
+ return
+}
+
+type LocalBuildAction struct {
+ Dir string
+ Repo string
+ Branch string
+ Compiler string
+ UserspaceDir string
+ ImagePath string
+ ImageName string
+}
+
+func (a *LocalBuildAction) Name() string {
+ return "kernel"
+}
+
+func (a *LocalBuildAction) Poll() (string, error) {
+ dir := filepath.Join(a.Dir, "linux")
+ runCmd(dir, "git", "reset", "--hard")
+ if _, err := runCmd(dir, "git", "pull"); err != nil {
+ if err := os.RemoveAll(dir); err != nil {
+ return "", fmt.Errorf("failed to remove repo dir: %v", err)
+ }
+ if err := os.MkdirAll(dir, 0700); err != nil {
+ return "", fmt.Errorf("failed to create repo dir: %v", err)
+ }
+ if _, err := runCmd("", "git", "clone", a.Repo, dir); err != nil {
+ return "", err
+ }
+ if _, err := runCmd(dir, "git", "pull"); err != nil {
+ return "", err
+ }
+ }
+ if a.Branch != "" {
+ if _, err := runCmd(dir, "git", "checkout", a.Branch); err != nil {
+ return "", err
+ }
+ }
+ rev, err := gitRevision(dir)
+ if err != nil {
+ return "", err
+ }
+ if patchesHash != "" {
+ rev += "/" + patchesHash
+ }
+ return rev, nil
+}
+
+func (a *LocalBuildAction) Build() error {
+ dir := filepath.Join(a.Dir, "linux")
+ hash, err := gitRevision(dir)
+ if err != nil {
+ return err
+ }
+ for _, p := range patches {
+ if err := a.apply(p); err != nil {
+ return err
+ }
+ }
+ Logf(0, "building kernel on %v...", hash)
+ if err := buildKernel(dir, a.Compiler); err != nil {
+ return fmt.Errorf("build failed: %v", err)
+ }
+ scriptFile := filepath.Join(a.Dir, "create-gce-image.sh")
+ if err := ioutil.WriteFile(scriptFile, []byte(createImageScript), 0700); err != nil {
+ return fmt.Errorf("failed to write script file: %v", err)
+ }
+ Logf(0, "building image...")
+ vmlinux := filepath.Join(dir, "vmlinux")
+ bzImage := filepath.Join(dir, "arch/x86/boot/bzImage")
+ if _, err := runCmd(a.Dir, scriptFile, a.UserspaceDir, bzImage, vmlinux, hash); err != nil {
+ return fmt.Errorf("image build failed: %v", err)
+ }
+ os.Remove(filepath.Join(a.Dir, "disk.raw"))
+ os.Remove(filepath.Join(a.Dir, "image.tar.gz"))
+ os.MkdirAll("image/obj", 0700)
+ if err := ioutil.WriteFile("image/tag", []byte(hash), 0600); err != nil {
+ return fmt.Errorf("failed to write tag file: %v", err)
+ }
+ if err := os.Rename(filepath.Join(a.Dir, "key"), "image/key"); err != nil {
+ return fmt.Errorf("failed to rename key file: %v", err)
+ }
+ if err := os.Rename(vmlinux, "image/obj/vmlinux"); err != nil {
+ return fmt.Errorf("failed to rename vmlinux file: %v", err)
+ }
+ if err := createImage(filepath.Join(a.Dir, "disk.tar.gz"), a.ImagePath, a.ImageName); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (a *LocalBuildAction) apply(p dashboard.Patch) error {
+ // Do --dry-run first to not mess with partially consistent state.
+ cmd := exec.Command("patch", "-p1", "--force", "--ignore-whitespace", "--dry-run")
+ cmd.Dir = filepath.Join(a.Dir, "linux")
+ cmd.Stdin = bytes.NewReader(p.Diff)
+ if output, err := cmd.CombinedOutput(); err != nil {
+ // If it reverses clean, then it's already applied (seems to be the easiest way to detect it).
+ cmd = exec.Command("patch", "-p1", "--force", "--ignore-whitespace", "--reverse", "--dry-run")
+ cmd.Dir = filepath.Join(a.Dir, "linux")
+ cmd.Stdin = bytes.NewReader(p.Diff)
+ if _, err := cmd.CombinedOutput(); err == nil {
+ Logf(0, "patch already present: %v", p.Title)
+ return nil
+ }
+ Logf(0, "patch failed: %v\n%s", p.Title, output)
+ return nil
+ }
+ // Now apply for real.
+ cmd = exec.Command("patch", "-p1", "--force", "--ignore-whitespace")
+ cmd.Dir = filepath.Join(a.Dir, "linux")
+ cmd.Stdin = bytes.NewReader(p.Diff)
+ if output, err := cmd.CombinedOutput(); err != nil {
+ return fmt.Errorf("patch '%v' failed after dry run:\n%s", p.Title, output)
+ }
+ Logf(0, "patch applied: %v", p.Title)
+ return nil
+}
+
+type GCSImageAction struct {
+ ImageArchive string
+ ImagePath string
+ ImageName string
+
+ handle *storage.ObjectHandle
+}
+
+func (a *GCSImageAction) Name() string {
+ return "GCS image"
+}
+
+func (a *GCSImageAction) Poll() (string, error) {
+ pos := strings.IndexByte(a.ImageArchive, '/')
+ if pos == -1 {
+ return "", fmt.Errorf("invalid GCS file name: %v", a.ImageArchive)
+ }
+ bkt := storageClient.Bucket(a.ImageArchive[:pos])
+ f := bkt.Object(a.ImageArchive[pos+1:])
+ attrs, err := f.Attrs(ctx)
+ if err != nil {
+ return "", fmt.Errorf("failed to read %v attributes: %v", a.ImageArchive, err)
+ }
+ if !attrs.Deleted.IsZero() {
+ return "", fmt.Errorf("file %v is deleted", a.ImageArchive)
+ }
+ a.handle = f.If(storage.Conditions{
+ GenerationMatch: attrs.Generation,
+ MetagenerationMatch: attrs.MetaGeneration,
+ })
+ return attrs.Updated.Format(time.RFC1123Z), nil
+}
+
+func (a *GCSImageAction) Build() error {
+ Logf(0, "downloading image archive...")
+ if err := os.RemoveAll("image"); err != nil {
+ return fmt.Errorf("failed to remove image dir: %v", err)
+ }
+ if err := downloadAndExtract(a.handle, "image"); err != nil {
+ return fmt.Errorf("failed to download and extract %v: %v", a.ImageArchive, err)
+ }
+ if err := createImage("image/disk.tar.gz", a.ImagePath, a.ImageName); err != nil {
+ return err
+ }
+ return nil
+}
+
func readConfig(filename string) *Config {
if filename == "" {
Fatalf("supply config in -config flag")
@@ -340,7 +488,7 @@ func readConfig(filename string) *Config {
return cfg
}
-func writeManagerConfig(httpPort int, file string) error {
+func writeManagerConfig(cfg *Config, httpPort int, file string) error {
tag, err := ioutil.ReadFile("image/tag")
if err != nil {
return fmt.Errorf("failed to read tag file: %v", err)
@@ -349,22 +497,24 @@ func writeManagerConfig(httpPort int, file string) error {
tag = tag[:len(tag)-1]
}
managerCfg := &config.Config{
- Name: cfg.Name,
- Hub_Addr: cfg.Hub_Addr,
- Hub_Key: cfg.Hub_Key,
- Http: fmt.Sprintf(":%v", httpPort),
- Rpc: ":0",
- Workdir: "workdir",
- Vmlinux: "image/obj/vmlinux",
- Tag: string(tag),
- Syzkaller: "gopath/src/github.com/google/syzkaller",
- Type: "gce",
- Machine_Type: cfg.Machine_Type,
- Count: cfg.Machine_Count,
- Image: cfg.Image_Name,
- Sandbox: cfg.Sandbox,
- Procs: cfg.Procs,
- Cover: true,
+ Name: cfg.Name,
+ Hub_Addr: cfg.Hub_Addr,
+ Hub_Key: cfg.Hub_Key,
+ Dashboard_Addr: cfg.Dashboard_Addr,
+ Dashboard_Key: cfg.Dashboard_Key,
+ Http: fmt.Sprintf(":%v", httpPort),
+ Rpc: ":0",
+ Workdir: "workdir",
+ Vmlinux: "image/obj/vmlinux",
+ Tag: string(tag),
+ Syzkaller: "gopath/src/github.com/google/syzkaller",
+ Type: "gce",
+ Machine_Type: cfg.Machine_Type,
+ Count: cfg.Machine_Count,
+ Image: cfg.Image_Name,
+ Sandbox: cfg.Sandbox,
+ Procs: cfg.Procs,
+ Cover: true,
}
if _, err := os.Stat("image/key"); err == nil {
managerCfg.Sshkey = "image/key"
@@ -389,27 +539,6 @@ func chooseUnusedPort() (int, error) {
return port, nil
}
-func openFile(file string) (*storage.ObjectHandle, time.Time, error) {
- pos := strings.IndexByte(file, '/')
- if pos == -1 {
- return nil, time.Time{}, fmt.Errorf("invalid GCS file name: %v", file)
- }
- bkt := storageClient.Bucket(file[:pos])
- f := bkt.Object(file[pos+1:])
- attrs, err := f.Attrs(ctx)
- if err != nil {
- return nil, time.Time{}, fmt.Errorf("failed to read %v attributes: %v", file, err)
- }
- if !attrs.Deleted.IsZero() {
- return nil, time.Time{}, fmt.Errorf("file %v is deleted", file)
- }
- f = f.If(storage.Conditions{
- GenerationMatch: attrs.Generation,
- MetagenerationMatch: attrs.MetaGeneration,
- })
- return f, attrs.Updated, nil
-}
-
func downloadAndExtract(f *storage.ObjectHandle, dir string) error {
r, err := f.NewReader(ctx)
if err != nil {
@@ -457,7 +586,22 @@ func downloadAndExtract(f *storage.ObjectHandle, dir string) error {
return nil
}
-func uploadFile(localFile string, gcsFile string) error {
+func createImage(localFile, gcsFile, imageName string) error {
+ Logf(0, "uploading image...")
+ if err := uploadFile(localFile, gcsFile); err != nil {
+ return fmt.Errorf("failed to upload image: %v", err)
+ }
+ Logf(0, "creating gce image...")
+ if err := GCE.DeleteImage(imageName); err != nil {
+ return fmt.Errorf("failed to delete GCE image: %v", err)
+ }
+ if err := GCE.CreateImage(imageName, gcsFile); err != nil {
+ return fmt.Errorf("failed to create GCE image: %v", err)
+ }
+ return nil
+}
+
+func uploadFile(localFile, gcsFile string) error {
local, err := os.Open(localFile)
if err != nil {
return err
@@ -475,39 +619,6 @@ func uploadFile(localFile string, gcsFile string) error {
return nil
}
-// updateSyzkallerBuild executes 'git pull' on syzkaller and all depenent packages.
-// Returns syzkaller HEAD hash.
-func updateSyzkallerBuild() (string, error) {
- cmd := exec.Command("go", "get", "-u", "-d", "github.com/google/syzkaller/syz-manager")
- if output, err := cmd.CombinedOutput(); err != nil {
- return "", fmt.Errorf("%v\n%s", err, output)
- }
- return gitRevision("gopath/src/github.com/google/syzkaller")
-}
-
-func gitUpdate(dir, repo, branch string) (string, error) {
- if _, err := runCmd(dir, "git", "pull"); err != nil {
- if err := os.RemoveAll(dir); err != nil {
- return "", fmt.Errorf("failed to remove repo dir: %v", err)
- }
- if err := os.MkdirAll(dir, 0700); err != nil {
- return "", fmt.Errorf("failed to create repo dir: %v", err)
- }
- if _, err := runCmd("", "git", "clone", repo, dir); err != nil {
- return "", err
- }
- if _, err := runCmd(dir, "git", "pull"); err != nil {
- return "", err
- }
- }
- if branch != "" {
- if _, err := runCmd(dir, "git", "checkout", branch); err != nil {
- return "", err
- }
- }
- return gitRevision(dir)
-}
-
func gitRevision(dir string) (string, error) {
output, err := runCmd(dir, "git", "log", "--pretty=format:'%H'", "-n", "1")
if err != nil {
diff --git a/tools/syz-dashtool/dashtool.go b/tools/syz-dashtool/dashtool.go
new file mode 100644
index 000000000..d3d37c7b6
--- /dev/null
+++ b/tools/syz-dashtool/dashtool.go
@@ -0,0 +1,121 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+// syz-dashtool allow to upload a single crash or all crashes in workdir
+// to a dashboard for testing.
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/google/syzkaller/dashboard"
+)
+
+var (
+ flagAddr = flag.String("addr", "", "dashboard address")
+ flagClient = flag.String("client", "", "dashboard client")
+ flagKey = flag.String("key", "", "dashboard key")
+)
+
+func main() {
+ flag.Parse()
+ if *flagAddr == "" || *flagClient == "" || *flagKey == "" {
+ fmt.Fprintf(os.Stderr, "addr/client/key flags are mandatory\n")
+ flag.PrintDefaults()
+ os.Exit(1)
+ }
+ dash := &dashboard.Dashboard{
+ Addr: *flagAddr,
+ Client: *flagClient,
+ Key: *flagKey,
+ }
+ if len(flag.Args()) == 0 {
+ fmt.Fprintf(os.Stderr, "specify command: report, report-all\n")
+ os.Exit(1)
+ }
+ switch flag.Args()[0] {
+ case "report":
+ if len(flag.Args()) != 2 {
+ fmt.Fprintf(os.Stderr, "usage: report logN\n")
+ os.Exit(1)
+ }
+ report(dash, flag.Args()[1])
+ case "report-all":
+ if len(flag.Args()) != 2 {
+ fmt.Fprintf(os.Stderr, "usage: report-all workdir/crashes\n")
+ os.Exit(1)
+ }
+ reportAll(dash, flag.Args()[1])
+ default:
+ fmt.Fprintf(os.Stderr, "unknown command: %v\n", flag.Args()[0])
+ os.Exit(1)
+ }
+}
+
+func report(dash *dashboard.Dashboard, logfile string) {
+ n := -1
+ for i := range logfile {
+ x, err := strconv.Atoi(logfile[i:])
+ if err == nil {
+ n = x
+ break
+ }
+ }
+ if n == -1 {
+ fmt.Fprintf(os.Stderr, "bad log file name\n")
+ os.Exit(1)
+ }
+ dir := filepath.Dir(logfile)
+
+ log, err := ioutil.ReadFile(logfile)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to read log file: %v\n", err)
+ os.Exit(1)
+ }
+ desc, err := ioutil.ReadFile(filepath.Join(dir, "description"))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to description file: %v\n", err)
+ os.Exit(1)
+ }
+ tag, _ := ioutil.ReadFile(filepath.Join(dir, fmt.Sprintf("tag%v", n)))
+ report, _ := ioutil.ReadFile(filepath.Join(dir, fmt.Sprintf("report%v", n)))
+
+ crash := &dashboard.Crash{
+ Tag: string(tag),
+ Desc: string(desc),
+ Log: log,
+ Report: report,
+ }
+
+ if err := dash.ReportCrash(crash); err != nil {
+ fmt.Fprintf(os.Stderr, "failed: %v\n", err)
+ os.Exit(1)
+ }
+}
+
+func reportAll(dash *dashboard.Dashboard, crashes string) {
+ dirs, err := ioutil.ReadDir(crashes)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to read crashes dir: %v\n", err)
+ os.Exit(1)
+ }
+ for _, dir := range dirs {
+ files, err := ioutil.ReadDir(filepath.Join(crashes, dir.Name()))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to read crashes dir: %v\n", err)
+ os.Exit(1)
+ }
+ for _, file := range files {
+ if !strings.HasPrefix(file.Name(), "log") {
+ continue
+ }
+ report(dash, filepath.Join(crashes, dir.Name(), file.Name()))
+ }
+ }
+}