aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2017-06-22 16:28:04 +0200
committerDmitry Vyukov <dvyukov@google.com>2017-08-02 15:11:03 +0200
commit61d1beb284931d6cc182b60868626f5e2ed819e4 (patch)
treedf069085f3eb2bf8864dbd6cf424f2da558022a2
parent9c3074429739f4c3a2ee6155f677d6444a51109d (diff)
dashboard/app: new dashboard app
The new app is based on our experience with syz-dash and is meant to supersede it. This app aims at full automation of bug lifecycle: reporting, tracking updates, closing fixed bugs. The main differences are: - this app has support for reporting bugs either by email or using an arbitrary external reporting system - this app tracks status of bugs - this app captures more info about kernel builds
-rw-r--r--dashboard/app/api.go585
-rw-r--r--dashboard/app/app.yaml30
-rw-r--r--dashboard/app/app_test.go181
-rw-r--r--dashboard/app/bug.html42
-rw-r--r--dashboard/app/common.html17
-rw-r--r--dashboard/app/config.go149
-rw-r--r--dashboard/app/config_stub.go37
-rw-r--r--dashboard/app/cron.yaml3
-rw-r--r--dashboard/app/entities.go167
-rw-r--r--dashboard/app/error.html12
-rw-r--r--dashboard/app/fix_test.go415
-rw-r--r--dashboard/app/handler.go86
-rw-r--r--dashboard/app/index.yaml41
-rw-r--r--dashboard/app/mail_bug.txt10
-rw-r--r--dashboard/app/main.go272
-rw-r--r--dashboard/app/main.html44
-rw-r--r--dashboard/app/reporting.go529
-rw-r--r--dashboard/app/reporting_email.go200
-rw-r--r--dashboard/app/reporting_external.go49
-rw-r--r--dashboard/app/reporting_test.go386
-rw-r--r--dashboard/app/static/favicon.ico0
-rw-r--r--dashboard/app/static/style.css122
-rw-r--r--dashboard/app/util_test.go196
23 files changed, 3573 insertions, 0 deletions
diff --git a/dashboard/app/api.go b/dashboard/app/api.go
new file mode 100644
index 000000000..e25767469
--- /dev/null
+++ b/dashboard/app/api.go
@@ -0,0 +1,585 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package dash
+
+import (
+ "bytes"
+ "compress/gzip"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/google/syzkaller/dashboard/dashapi"
+ "github.com/google/syzkaller/pkg/hash"
+ "golang.org/x/net/context"
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/datastore"
+ "google.golang.org/appengine/log"
+)
+
+func init() {
+ http.Handle("/api", handleJSON(handleAPI))
+}
+
+var apiHandlers = map[string]APIHandler{
+ "log_error": apiLogError,
+ "upload_build": apiUploadBuild,
+ "builder_poll": apiBuilderPoll,
+ "report_crash": apiReportCrash,
+ "report_failed_repro": apiReportFailedRepro,
+ "reporting_poll": apiReportingPoll,
+ "reporting_update": apiReportingUpdate,
+}
+
+type JSONHandler func(c context.Context, r *http.Request) (interface{}, error)
+type APIHandler func(c context.Context, ns string, r *http.Request) (interface{}, error)
+
+// Overridable for testing.
+var timeNow = func(c context.Context) time.Time {
+ return time.Now()
+}
+
+func timeSince(c context.Context, t time.Time) time.Duration {
+ return timeNow(c).Sub(t)
+}
+
+func handleJSON(fn JSONHandler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+ reply, err := fn(c, r)
+ if err != nil {
+ log.Errorf(c, "%v", err)
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
+ w.Header().Set("Content-Encoding", "gzip")
+ gz := gzip.NewWriter(w)
+ json.NewEncoder(gz).Encode(reply)
+ gz.Close()
+ } else {
+ json.NewEncoder(w).Encode(reply)
+ }
+ })
+}
+
+func handleAPI(c context.Context, r *http.Request) (reply interface{}, err error) {
+ ns, err := checkClient(c, r.FormValue("client"), r.FormValue("key"))
+ if err != nil {
+ log.Warningf(c, "%v", err)
+ return nil, fmt.Errorf("unauthorized request")
+ }
+ method := r.FormValue("method")
+ handler := apiHandlers[method]
+ if handler == nil {
+ return nil, fmt.Errorf("unknown api method %q", method)
+ }
+ return handler(c, ns, r)
+}
+
+func checkClient(c context.Context, name0, key0 string) (string, error) {
+ for name, key := range config.Clients {
+ if name == name0 {
+ if key != key0 {
+ return "", fmt.Errorf("wrong client %q key", name0)
+ }
+ return "", nil
+ }
+ }
+ for ns, cfg := range config.Namespaces {
+ for name, key := range cfg.Clients {
+ if name == name0 {
+ if key != key0 {
+ return "", fmt.Errorf("wrong client %q key", name0)
+ }
+ return ns, nil
+ }
+ }
+ }
+ return "", fmt.Errorf("unauthorized api request from %q", name0)
+}
+
+func apiLogError(c context.Context, ns string, r *http.Request) (interface{}, error) {
+ req := new(dashapi.LogEntry)
+ if err := json.NewDecoder(r.Body).Decode(req); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal request: %v", err)
+ }
+ log.Errorf(c, "%v: %v", req.Name, req.Text)
+ return nil, nil
+}
+
+func apiBuilderPoll(c context.Context, ns string, r *http.Request) (interface{}, error) {
+ req := new(dashapi.BuilderPollReq)
+ if err := json.NewDecoder(r.Body).Decode(req); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal request: %v", err)
+ }
+ var bugs []*Bug
+ _, err := datastore.NewQuery("Bug").
+ Filter("Namespace=", ns).
+ Filter("Status<", BugStatusFixed).
+ GetAll(c, &bugs)
+ if err != nil {
+ return nil, fmt.Errorf("failed to query bugs: %v", err)
+ }
+ m := make(map[string]bool)
+loop:
+ for _, bug := range bugs {
+ // TODO(dvyukov): include this condition into the query if possible.
+ if len(bug.Commits) == 0 {
+ continue
+ }
+ for _, mgr := range bug.PatchedOn {
+ if mgr == req.Manager {
+ continue loop
+ }
+ }
+ for _, com := range bug.Commits {
+ m[com] = true
+ }
+ }
+ commits := make([]string, 0, len(m))
+ for com := range m {
+ commits = append(commits, com)
+ }
+ sort.Strings(commits)
+ resp := &dashapi.BuilderPollResp{
+ PendingCommits: commits,
+ }
+ return resp, nil
+}
+
+func apiUploadBuild(c context.Context, ns string, r *http.Request) (interface{}, error) {
+ req := new(dashapi.Build)
+ if err := json.NewDecoder(r.Body).Decode(req); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal request: %v", err)
+ }
+ checkStrLen := func(str, name string, maxLen int) error {
+ if str == "" {
+ return fmt.Errorf("%v is empty", name)
+ }
+ if len(str) > maxLen {
+ return fmt.Errorf("%v is too long (%v)", name, len(str))
+ }
+ return nil
+ }
+ if err := checkStrLen(req.Manager, "Build.Manager", MaxStringLen); err != nil {
+ return nil, err
+ }
+ if err := checkStrLen(req.ID, "Build.ID", MaxStringLen); err != nil {
+ return nil, err
+ }
+ if err := checkStrLen(req.KernelRepo, "Build.KernelRepo", MaxStringLen); err != nil {
+ return nil, err
+ }
+ if err := checkStrLen(req.KernelBranch, "Build.KernelBranch", MaxStringLen); err != nil {
+ return nil, err
+ }
+ if err := checkStrLen(req.SyzkallerCommit, "Build.SyzkallerCommit", MaxStringLen); err != nil {
+ return nil, err
+ }
+ if err := checkStrLen(req.CompilerID, "Build.CompilerID", MaxStringLen); err != nil {
+ return nil, err
+ }
+ if err := checkStrLen(req.KernelCommit, "Build.KernelCommit", MaxStringLen); err != nil {
+ return nil, err
+ }
+ configID, err := putText(c, ns, "KernelConfig", req.KernelConfig, true)
+ if err != nil {
+ return nil, err
+ }
+ build := &Build{
+ Namespace: ns,
+ Manager: req.Manager,
+ ID: req.ID,
+ SyzkallerCommit: req.SyzkallerCommit,
+ CompilerID: req.CompilerID,
+ KernelRepo: req.KernelRepo,
+ KernelBranch: req.KernelBranch,
+ KernelCommit: req.KernelCommit,
+ KernelConfig: configID,
+ }
+ if _, err := datastore.Put(c, buildKey(c, ns, req.ID), build); err != nil {
+ return nil, err
+ }
+
+ if len(req.Commits) != 0 {
+ if err := addCommitsToBugs(c, ns, req.Manager, req.Commits); err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+func addCommitsToBugs(c context.Context, ns, manager string, commits []string) error {
+ commitMap := make(map[string]bool)
+ for _, com := range commits {
+ commitMap[com] = true
+ }
+ managers, err := managerList(c, ns)
+ if err != nil {
+ return err
+ }
+ var bugs []*Bug
+ keys, err := datastore.NewQuery("Bug").
+ Filter("Namespace=", ns).
+ Filter("Status<", BugStatusFixed).
+ GetAll(c, &bugs)
+ if err != nil {
+ return fmt.Errorf("failed to query bugs: %v", err)
+ }
+ now := timeNow(c)
+ for i, bug := range bugs {
+ if !fixedWith(bug, manager, commitMap) {
+ continue
+ }
+ tx := func(c context.Context) error {
+ bug := new(Bug)
+ if err := datastore.Get(c, keys[i], bug); err != nil {
+ return fmt.Errorf("failed to get bug %v: %v", keys[i].StringID(), err)
+ }
+ if !fixedWith(bug, manager, commitMap) {
+ return nil
+ }
+ bug.PatchedOn = append(bug.PatchedOn, manager)
+ if bug.Status == BugStatusOpen {
+ fixed := true
+ for _, mgr := range managers {
+ if !stringInList(bug.PatchedOn, mgr) {
+ fixed = false
+ break
+ }
+ }
+ if fixed {
+ bug.Status = BugStatusFixed
+ bug.Closed = now
+ }
+ }
+ if _, err := datastore.Put(c, keys[i], bug); err != nil {
+ return fmt.Errorf("failed to put bug: %v", err)
+ }
+ return nil
+ }
+ if err := datastore.RunInTransaction(c, tx, nil); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func managerList(c context.Context, ns string) ([]string, error) {
+ var builds []*Build
+ _, err := datastore.NewQuery("Build").
+ Filter("Namespace=", ns).
+ Project("Manager").
+ Distinct().
+ GetAll(c, &builds)
+ if err != nil {
+ return nil, fmt.Errorf("failed to query builds: %v", err)
+ }
+ var managers []string
+ for _, build := range builds {
+ managers = append(managers, build.Manager)
+ }
+ return managers, nil
+}
+
+func fixedWith(bug *Bug, manager string, commits map[string]bool) bool {
+ if stringInList(bug.PatchedOn, manager) {
+ return false
+ }
+ for _, com := range bug.Commits {
+ if !commits[com] {
+ return false
+ }
+ }
+ return len(bug.Commits) > 0
+}
+
+func stringInList(list []string, str string) bool {
+ for _, s := range list {
+ if s == str {
+ return true
+ }
+ }
+ return false
+}
+
+func apiReportCrash(c context.Context, ns string, r *http.Request) (interface{}, error) {
+ req := new(dashapi.Crash)
+ if err := json.NewDecoder(r.Body).Decode(req); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal request: %v", err)
+ }
+ req.Title = limitLength(req.Title, maxTextLen)
+ if len(req.Maintainers) > maxMaintainers {
+ req.Maintainers = req.Maintainers[:maxMaintainers]
+ }
+
+ build, err := loadBuild(c, ns, req.BuildID)
+ if err != nil {
+ return nil, err
+ }
+
+ crash := &Crash{
+ Manager: build.Manager,
+ BuildID: req.BuildID,
+ Time: timeNow(c),
+ Maintainers: req.Maintainers,
+ ReproOpts: req.ReproOpts,
+ ReportLen: len(req.Report),
+ }
+
+ if crash.Log, err = putText(c, ns, "CrashLog", req.Log, false); err != nil {
+ return nil, err
+ }
+ if crash.Report, err = putText(c, ns, "CrashReport", req.Report, false); err != nil {
+ return nil, err
+ }
+ if crash.ReproSyz, err = putText(c, ns, "ReproSyz", req.ReproSyz, false); err != nil {
+ return nil, err
+ }
+ if crash.ReproC, err = putText(c, ns, "ReproC", req.ReproC, false); err != nil {
+ return nil, err
+ }
+
+ bug := new(Bug)
+ var bugKey *datastore.Key
+
+ tx := func(c context.Context) error {
+ for seq := int64(0); ; seq++ {
+ bugHash := bugKeyHash(ns, req.Title, seq)
+ bugKey = datastore.NewKey(c, "Bug", bugHash, 0, nil)
+ if err := datastore.Get(c, bugKey, bug); err != nil {
+ if err != datastore.ErrNoSuchEntity {
+ return fmt.Errorf("failed to get bug: %v", err)
+ }
+ bug = &Bug{
+ Namespace: ns,
+ Seq: seq,
+ Title: req.Title,
+ Status: BugStatusOpen,
+ NumCrashes: 0,
+ NumRepro: 0,
+ ReproLevel: ReproLevelNone,
+ HasReport: false,
+ FirstTime: crash.Time,
+ LastTime: crash.Time,
+ }
+ for _, rep := range config.Namespaces[ns].Reporting {
+ bug.Reporting = append(bug.Reporting, BugReporting{
+ Name: rep.Name,
+ ID: bugReportingHash(bugHash, rep.Name),
+ })
+ }
+ break
+ }
+ canon, err := canonicalBug(c, bug)
+ if err != nil {
+ return err
+ }
+ if canon.Status == BugStatusOpen {
+ break
+ }
+ }
+
+ bug.NumCrashes++
+ bug.LastTime = crash.Time
+ repro := ReproLevelNone
+ if crash.ReproC != 0 {
+ repro = ReproLevelC
+ } else if crash.ReproSyz != 0 {
+ repro = ReproLevelSyz
+ }
+ if repro != ReproLevelNone {
+ bug.NumRepro++
+ }
+ if bug.ReproLevel < repro {
+ bug.ReproLevel = repro
+ }
+ if crash.Report != 0 {
+ bug.HasReport = true
+ }
+ if bugKey, err = datastore.Put(c, bugKey, bug); err != nil {
+ return fmt.Errorf("failed to put bug: %v", err)
+ }
+
+ crashKey := datastore.NewIncompleteKey(c, "Crash", bugKey)
+ if _, err = datastore.Put(c, crashKey, crash); err != nil {
+ return fmt.Errorf("failed to put crash: %v", err)
+ }
+ return nil
+ }
+ if err := datastore.RunInTransaction(c, tx, &datastore.TransactionOptions{XG: true}); err != nil {
+ return nil, err
+ }
+ purgeOldCrashes(c, bug, bugKey)
+ return nil, nil
+}
+
+func purgeOldCrashes(c context.Context, bug *Bug, bugKey *datastore.Key) {
+ if bug.NumCrashes <= maxCrashes {
+ return
+ }
+ var crashes []*Crash
+ keys, err := datastore.NewQuery("Crash").
+ Ancestor(bugKey).
+ Filter("ReproC=", 0).
+ Filter("ReproSyz=", 0).
+ Order("Report").
+ Order("Time").
+ Limit(maxCrashes+100).
+ GetAll(c, &crashes)
+ if err != nil {
+ log.Errorf(c, "failed to fetch purge crashes: %v", err)
+ return
+ }
+ if len(keys) <= maxCrashes {
+ return
+ }
+ keys = keys[:len(keys)-maxCrashes]
+ crashes = crashes[:len(crashes)-maxCrashes]
+ var texts []*datastore.Key
+ for _, crash := range crashes {
+ if crash.ReproSyz != 0 || crash.ReproC != 0 {
+ log.Errorf(c, "purging reproducer?")
+ continue
+ }
+ if crash.Log != 0 {
+ texts = append(texts, datastore.NewKey(c, "CrashLog", "", crash.Log, nil))
+ }
+ if crash.Report != 0 {
+ texts = append(texts, datastore.NewKey(c, "CrashReport", "", crash.Report, nil))
+ }
+ }
+ if len(texts) != 0 {
+ if err := datastore.DeleteMulti(c, texts); err != nil {
+ log.Errorf(c, "failed to delete old crash texts: %v", err)
+ return
+ }
+ }
+ if err := datastore.DeleteMulti(c, keys); err != nil {
+ log.Errorf(c, "failed to delete old crashes: %v", err)
+ return
+ }
+ log.Infof(c, "deleted %v crashes", len(keys))
+}
+
+func apiReportFailedRepro(c context.Context, ns string, r *http.Request) (interface{}, error) {
+ req := new(dashapi.FailedRepro)
+ if err := json.NewDecoder(r.Body).Decode(req); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal request: %v", err)
+ }
+ req.Title = limitLength(req.Title, maxTextLen)
+
+ tx := func(c context.Context) error {
+ var bugKey *datastore.Key
+ bug := new(Bug)
+ for seq := int64(0); ; seq++ {
+ bugHash := bugKeyHash(ns, req.Title, seq)
+ bugKey = datastore.NewKey(c, "Bug", bugHash, 0, nil)
+ if err := datastore.Get(c, bugKey, bug); err != nil {
+ return fmt.Errorf("failed to get bug: %v", err)
+ }
+ if bug.Status == BugStatusOpen || bug.Status == BugStatusDup {
+ break
+ }
+ }
+
+ bug.NumRepro++
+ if _, err := datastore.Put(c, bugKey, bug); err != nil {
+ return fmt.Errorf("failed to put bug: %v", err)
+ }
+ return nil
+ }
+ if err := datastore.RunInTransaction(c, tx, nil); err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+func putText(c context.Context, ns, tag string, data []byte, dedup bool) (int64, error) {
+ if ns == "" {
+ return 0, fmt.Errorf("putting text outside of namespace")
+ }
+ if len(data) == 0 {
+ return 0, nil
+ }
+ const (
+ maxTextLen = 2 << 20
+ maxCompressedLen = 1000 << 10 // datastore entity limit is 1MB
+ )
+ if len(data) > maxTextLen {
+ data = data[:maxTextLen]
+ }
+ b := new(bytes.Buffer)
+ for {
+ z, _ := gzip.NewWriterLevel(b, gzip.BestCompression)
+ z.Write(data)
+ z.Close()
+ if len(b.Bytes()) < maxCompressedLen {
+ break
+ }
+ data = data[:len(data)/10*9]
+ b.Reset()
+ }
+ var key *datastore.Key
+ if dedup {
+ h := hash.Hash([]byte(ns), b.Bytes())
+ key = datastore.NewKey(c, tag, "", h.Truncate64(), nil)
+ } else {
+ key = datastore.NewIncompleteKey(c, tag, nil)
+ }
+ text := &Text{
+ Namespace: ns,
+ Text: b.Bytes(),
+ }
+ key, err := datastore.Put(c, key, text)
+ if err != nil {
+ return 0, err
+ }
+ return key.IntID(), nil
+}
+
+func getText(c context.Context, tag string, id int64) ([]byte, error) {
+ if id == 0 {
+ return nil, nil
+ }
+ text := new(Text)
+ if err := datastore.Get(c, datastore.NewKey(c, tag, "", id, nil), text); err != nil {
+ return nil, fmt.Errorf("failed to read text %v: %v", tag, err)
+ }
+ d, err := gzip.NewReader(bytes.NewBuffer(text.Text))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read text %v: %v", tag, err)
+ }
+ data, err := ioutil.ReadAll(d)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read text %v: %v", tag, err)
+ }
+ return data, nil
+}
+
+// limitLength essentially does return s[:max],
+// but it ensures that we dot not split UTF-8 rune in half.
+// Otherwise appengine python scripts will break badly.
+func limitLength(s string, max int) string {
+ s = strings.TrimSpace(s)
+ if len(s) <= max {
+ return s
+ }
+ for {
+ s = s[:max]
+ r, size := utf8.DecodeLastRuneInString(s)
+ if r != utf8.RuneError || size != 1 {
+ return s
+ }
+ max--
+ }
+}
diff --git a/dashboard/app/app.yaml b/dashboard/app/app.yaml
new file mode 100644
index 000000000..7b7715de6
--- /dev/null
+++ b/dashboard/app/app.yaml
@@ -0,0 +1,30 @@
+application: syzkaller
+version: 1
+runtime: go
+api_version: go1
+
+inbound_services:
+- mail
+
+handlers:
+- url: /favicon.ico
+ static_files: static/favicon.ico
+ upload: static/favicon.ico
+ secure: always
+- url: /static
+ static_dir: static
+ secure: always
+- url: /(|bug|text)
+ script: _go_app
+ login: required
+ secure: always
+- url: /(api)
+ script: _go_app
+ secure: always
+- url: /(email_poll)
+ script: _go_app
+ login: admin
+ secure: always
+- url: /_ah/mail/.+
+ script: _go_app
+ login: admin
diff --git a/dashboard/app/app_test.go b/dashboard/app/app_test.go
new file mode 100644
index 000000000..4ddd3fb7b
--- /dev/null
+++ b/dashboard/app/app_test.go
@@ -0,0 +1,181 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+// +build aetest
+
+package dash
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/google/syzkaller/dashboard/dashapi"
+)
+
+// Config used in tests.
+var config = GlobalConfig{
+ AuthDomain: "@foo.com",
+ Clients: map[string]string{
+ "reporting": "reportingkeyreportingkeyreportingkey",
+ },
+ Namespaces: map[string]*Config{
+ "test1": &Config{
+ Key: "test1keytest1keytest1key",
+ Clients: map[string]string{
+ client1: key1,
+ },
+ Reporting: []Reporting{
+ {
+ Name: "reporting1",
+ DailyLimit: 3,
+ Config: &TestConfig{
+ Index: 1,
+ },
+ },
+ {
+ Name: "reporting2",
+ DailyLimit: 3,
+ Config: &TestConfig{
+ Index: 2,
+ },
+ },
+ },
+ },
+ "test2": &Config{
+ Key: "test2keytest2keytest2key",
+ Clients: map[string]string{
+ client2: key2,
+ },
+ Reporting: []Reporting{
+ {
+ Name: "reporting1",
+ DailyLimit: 3,
+ Config: &TestConfig{},
+ },
+ },
+ },
+ },
+}
+
+const (
+ client1 = "client1"
+ client2 = "client2"
+ key1 = "client1keyclient1keyclient1key"
+ key2 = "client2keyclient2keyclient2key"
+)
+
+type TestConfig struct {
+ Index int
+}
+
+func (cfg *TestConfig) Type() string {
+ return "test"
+}
+
+func (cfg *TestConfig) Validate() error {
+ return nil
+}
+
+func testBuild(id int) *dashapi.Build {
+ return &dashapi.Build{
+ Manager: fmt.Sprintf("manager%v", id),
+ ID: fmt.Sprintf("build%v", id),
+ SyzkallerCommit: fmt.Sprintf("syzkaller_commit%v", id),
+ CompilerID: fmt.Sprintf("compiler%v", id),
+ KernelRepo: fmt.Sprintf("repo%v", id),
+ KernelBranch: fmt.Sprintf("branch%v", id),
+ KernelCommit: fmt.Sprintf("kernel_commit%v", id),
+ KernelConfig: []byte(fmt.Sprintf("config%v", id)),
+ }
+}
+
+func testCrash(build *dashapi.Build, id int) *dashapi.Crash {
+ return &dashapi.Crash{
+ BuildID: build.ID,
+ Title: fmt.Sprintf("title%v", id),
+ Log: []byte(fmt.Sprintf("log%v", id)),
+ Report: []byte(fmt.Sprintf("report%v", id)),
+ }
+}
+
+func TestApp(t *testing.T) {
+ c := NewCtx(t)
+ defer c.Close()
+
+ c.expectOK(c.GET("/"))
+
+ c.expectFail("unknown api method", c.API(client1, key1, "unsupported_method", nil, nil))
+
+ ent := &dashapi.LogEntry{
+ Name: "name",
+ Text: "text",
+ }
+ c.expectOK(c.API(client1, key1, "log_error", ent, nil))
+
+ build := testBuild(1)
+ c.expectOK(c.API(client1, key1, "upload_build", build, nil))
+ // Uploading the same build must be OK.
+ c.expectOK(c.API(client1, key1, "upload_build", build, nil))
+
+ // Some bad combinations of client/key.
+ c.expectFail("unauthorized request", c.API(client1, "", "upload_build", build, nil))
+ c.expectFail("unauthorized request", c.API("unknown", key1, "upload_build", build, nil))
+ c.expectFail("unauthorized request", c.API(client1, key2, "upload_build", build, nil))
+
+ crash1 := &dashapi.Crash{
+ BuildID: "build1",
+ Title: "title1",
+ Maintainers: []string{`"Foo Bar" <foo@bar.com>`, `bar@foo.com`},
+ Log: []byte("log1"),
+ Report: []byte("report1"),
+ }
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+
+ // Test that namespace isolation works.
+ c.expectFail("unknown build", c.API(client2, key2, "report_crash", crash1, nil))
+
+ crash2 := &dashapi.Crash{
+ BuildID: "build1",
+ Title: "title2",
+ Maintainers: []string{`bar@foo.com`},
+ Log: []byte("log2"),
+ Report: []byte("report2"),
+ ReproOpts: []byte("opts"),
+ ReproSyz: []byte("syz repro"),
+ ReproC: []byte("c repro"),
+ }
+ c.expectOK(c.API(client1, key1, "report_crash", crash2, nil))
+
+ // Provoke purgeOldCrashes.
+ for i := 0; i < 30; i++ {
+ crash := &dashapi.Crash{
+ BuildID: "build1",
+ Title: "title1",
+ Maintainers: []string{`bar@foo.com`},
+ Log: []byte(fmt.Sprintf("log%v", i)),
+ Report: []byte(fmt.Sprintf("report%v", i)),
+ }
+ c.expectOK(c.API(client1, key1, "report_crash", crash, nil))
+ }
+
+ repro := &dashapi.FailedRepro{
+ Manager: "manager1",
+ BuildID: "build1",
+ Title: "title1",
+ }
+ c.expectOK(c.API(client1, key1, "report_failed_repro", repro, nil))
+
+ pr := &dashapi.PollRequest{
+ Type: "test",
+ }
+ resp := new(dashapi.PollResponse)
+ c.expectOK(c.API(client1, key1, "reporting_poll", pr, resp))
+
+ cmd := &dashapi.BugUpdate{
+ ID: "id",
+ Status: dashapi.BugStatusOpen,
+ ReproLevel: dashapi.ReproLevelC,
+ DupOf: "",
+ }
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, nil))
+}
diff --git a/dashboard/app/bug.html b/dashboard/app/bug.html
new file mode 100644
index 000000000..94e59a90c
--- /dev/null
+++ b/dashboard/app/bug.html
@@ -0,0 +1,42 @@
+<!doctype html>
+<html>
+<head>
+ <title>Syzkaller Dashboard</title>
+ <link rel="stylesheet" href="/static/style.css"/>
+</head>
+<body>
+ {{template "header" .Header}}
+
+ Title: {{.Bug.Title}}<br>
+ Namespace: {{.Bug.Namespace}}<br>
+ Crashes: {{.Bug.NumCrashes}}<br>
+ First: {{formatTime .Bug.FirstTime}}<br>
+ Last: {{formatTime .Bug.LastTime}}<br>
+ Reporting: {{if .Bug.Link}}<a href="{{.Bug.Link}}">{{.Bug.Status}}</a>{{else}}{{.Bug.Status}}{{end}}<br>
+ Commits: {{.Bug.Commits}}<br>
+
+ <table class="list_table">
+ <caption>Crashes:</caption>
+ <tr>
+ <th>Manager</th>
+ <th>Time</th>
+ <th>Log</th>
+ <th>Report</th>
+ <th>Syz repro</th>
+ <th>C repro</th>
+ <th>Maintainers</th>
+ </tr>
+ {{range $c := $.Crashes}}
+ <tr>
+ <td class="manager">{{$c.Manager}}</td>
+ <td class="time">{{formatTime $c.Time}}</td>
+ <td class="repro">{{if $c.LogLink}}<a href="{{$c.LogLink}}">log</a>{{end}}</td>
+ <td class="repro">{{if $c.ReportLink}}<a href="{{$c.ReportLink}}">report</a>{{end}}</td>
+ <td class="repro">{{if $c.ReproSyzLink}}<a href="{{$c.ReproSyzLink}}">syz</a>{{end}}</td>
+ <td class="repro">{{if $c.ReproCLink}}<a href="{{$c.ReproCLink}}">C</a>{{end}}</td>
+ <td class="maintainers" title="{{$c.Maintainers}}">{{$c.Maintainers}}</td>
+ </tr>
+ {{end}}
+ </table>
+</body>
+</html>
diff --git a/dashboard/app/common.html b/dashboard/app/common.html
new file mode 100644
index 000000000..74785b105
--- /dev/null
+++ b/dashboard/app/common.html
@@ -0,0 +1,17 @@
+{{define "header"}}
+ <header id="topbar">
+ <table class="position_table">
+ <tr>
+ <td>
+ <h1><a href="/">syzkaller</a></h1>
+ </td>
+ <td class="search">
+ <a href="https://github.com/google/syzkaller/blob/master/docs/found_bugs.md" target="_blank">found bugs</a> |
+ <a href="https://groups.google.com/forum/#!forum/syzkaller" target="_blank">mailing list</a> |
+ <a href="https://github.com/google/syzkaller" target="_blank">github</a>
+ </td>
+ </tr>
+ </table>
+ </header>
+ <br>
+{{end}}
diff --git a/dashboard/app/config.go b/dashboard/app/config.go
new file mode 100644
index 000000000..a94540d0e
--- /dev/null
+++ b/dashboard/app/config.go
@@ -0,0 +1,149 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package dash
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "time"
+)
+
+// There are multiple configurable aspects of the app (namespaces, reporting, API clients, etc).
+// The exact config is stored in a global config variable and is read-only.
+// Also see config_stub.go.
+type GlobalConfig struct {
+ // Email suffix of authorized users (e.g. "@foobar.com").
+ AuthDomain string
+ // Global API clients that work across namespaces (e.g. external reporting).
+ Clients map[string]string
+ // Per-namespace config.
+ // Namespaces are a mechanism to separate groups of different kernels.
+ // E.g. Debian 4.4 kernels and Ubuntu 4.9 kernels.
+ // Each namespace has own reporting config, own API clients
+ // and bugs are not merged across namespaces.
+ Namespaces map[string]*Config
+}
+
+// Per-namespace config.
+type Config struct {
+ // Per-namespace clients that act only on a particular namespace.
+ Clients map[string]string
+ // A unique key for hashing, can be anything.
+ Key string
+ // Mail bugs without reports (e.g. "no output").
+ MailWithoutReport bool
+ // How long should we wait for a C repro before reporting a bug.
+ WaitForRepro time.Duration
+ // Reporting config.
+ Reporting []Reporting
+}
+
+// One reporting stage.
+type Reporting struct {
+ // A unique name (the app does not care about exact contents).
+ Name string
+ // See ReportingStatus below.
+ Status ReportingStatus
+ // How many new bugs report per day.
+ DailyLimit int
+ // Type of reporting and its configuration.
+ // The app has one built-in type, EmailConfig, which reports bugs by email.
+ // And ExternalConfig which can be used to attach any external reporting system (e.g. Bugzilla).
+ Config ReportingType
+}
+
+type ReportingType interface {
+ // Type returns a unique string that identifies this reporting type (e.g. "email").
+ Type() string
+ // Validate validates the current object, this is called only during init.
+ Validate() error
+}
+
+var (
+ clientNameRe = regexp.MustCompile("^[a-zA-Z0-9-_]{4,100}$")
+ clientKeyRe = regexp.MustCompile("^[a-zA-Z0-9]{16,128}$")
+)
+
+type ReportingStatus int
+
+const (
+ // Send reports to this reporting stage.
+ ReportingActive ReportingStatus = iota
+ // Don't send anything to this reporting, but don't skip it as well.
+ ReportingSuspended
+ // Skip this reporting.
+ ReportingDisabled
+)
+
+func (cfg *Config) ReportingByName(name string) *Reporting {
+ for i := range cfg.Reporting {
+ reporting := &cfg.Reporting[i]
+ if reporting.Name == name {
+ return reporting
+ }
+ }
+ return nil
+}
+
+func init() {
+ // Validate the global config.
+ if len(config.Namespaces) == 0 {
+ panic("no namespaces found")
+ }
+ namespaces := make(map[string]bool)
+ clientNames := make(map[string]bool)
+ checkClients(clientNames, config.Clients)
+ for ns, cfg := range config.Namespaces {
+ if ns == "" {
+ panic("empty namespace name")
+ }
+ if namespaces[ns] {
+ panic(fmt.Sprintf("duplicate namespace %q", ns))
+ }
+ namespaces[ns] = true
+ checkClients(clientNames, cfg.Clients)
+ if !clientKeyRe.MatchString(cfg.Key) {
+ panic(fmt.Sprintf("bad namespace %q key: %q", ns, cfg.Key))
+ }
+ if len(cfg.Reporting) == 0 {
+ panic(fmt.Sprintf("no reporting in namespace %q", ns))
+ }
+ reportingNames := make(map[string]bool)
+ for _, reporting := range cfg.Reporting {
+ if reporting.Name == "" {
+ panic(fmt.Sprintf("empty reporting name in namespace %q", ns))
+ }
+ if reportingNames[reporting.Name] {
+ panic(fmt.Sprintf("duplicate reporting name %q", reporting.Name))
+ }
+ reportingNames[reporting.Name] = true
+ if reporting.Config.Type() == "" {
+ panic(fmt.Sprintf("empty reporting type for %q", reporting.Name))
+ }
+ if err := reporting.Config.Validate(); err != nil {
+ panic(err)
+ }
+ if _, err := json.Marshal(reporting.Config); err != nil {
+ panic(fmt.Sprintf("failed to json marshal %q config: %v",
+ reporting.Name, err))
+ }
+ }
+ }
+}
+
+func checkClients(clientNames map[string]bool, clients map[string]string) {
+ for name, key := range clients {
+ if !clientNameRe.MatchString(name) {
+ panic(fmt.Sprintf("bad client name: %v", name))
+ }
+ if !clientKeyRe.MatchString(key) {
+ panic(fmt.Sprintf("bad client key: %v", key))
+ }
+ if clientNames[name] {
+ panic(fmt.Sprintf("duplicate client name: %v", name))
+ }
+ clientNames[name] = true
+ }
+}
diff --git a/dashboard/app/config_stub.go b/dashboard/app/config_stub.go
new file mode 100644
index 000000000..0c68453a8
--- /dev/null
+++ b/dashboard/app/config_stub.go
@@ -0,0 +1,37 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+// +build !aetest
+
+package dash
+
+import "time"
+
+// Stub config variable that merely makes link success.
+// The app will panic in init with this empty config.
+// When deploying the app one needs to replace this config with a real one.
+// See an example below.
+var config GlobalConfig
+
+var example = GlobalConfig{
+ Namespaces: map[string]*Config{
+ "upstream": &Config{
+ Key: "123",
+ Clients: map[string]string{
+ "foo": "bar",
+ },
+ MailWithoutReport: false,
+ WaitForRepro: 12 * time.Hour,
+ Reporting: []Reporting{
+ Reporting{
+ Name: "upstream",
+ DailyLimit: 10,
+ Config: &EmailConfig{
+ Email: "syzkaller@googlegroups.com",
+ MailMaintainers: true,
+ },
+ },
+ },
+ },
+ },
+}
diff --git a/dashboard/app/cron.yaml b/dashboard/app/cron.yaml
new file mode 100644
index 000000000..fba749cd0
--- /dev/null
+++ b/dashboard/app/cron.yaml
@@ -0,0 +1,3 @@
+cron:
+- url: /email_poll
+ schedule: every 10 minutes
diff --git a/dashboard/app/entities.go b/dashboard/app/entities.go
new file mode 100644
index 000000000..b2a35c16f
--- /dev/null
+++ b/dashboard/app/entities.go
@@ -0,0 +1,167 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package dash
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/google/syzkaller/dashboard/dashapi"
+ "github.com/google/syzkaller/pkg/hash"
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/datastore"
+)
+
+// This file contains definitions of entities stored in datastore.
+
+const (
+ maxMaintainers = 50
+ maxTextLen = 200
+ MaxStringLen = 1024
+
+ maxCrashes = 20
+)
+
+type Build struct {
+ Namespace string
+ Manager string
+ ID string // unique ID generated by syz-ci
+ SyzkallerCommit string
+ CompilerID string
+ KernelRepo string
+ KernelBranch string
+ KernelCommit string
+ KernelConfig int64 // reference to KernelConfig text entity
+}
+
+type Bug struct {
+ Namespace string
+ Seq int64 // sequences of the bug with the same title
+ Title string
+ Status int
+ DupOf string
+ NumCrashes int64
+ NumRepro int64
+ ReproLevel dashapi.ReproLevel
+ HasReport bool
+ FirstTime time.Time
+ LastTime time.Time
+ Closed time.Time
+ Reporting []BugReporting
+ Commits []string
+ PatchedOn []string
+}
+
+type BugReporting struct {
+ Name string // refers to Reporting.Name
+ ID string // unique ID per BUG/BugReporting used in commucation with external systems
+ Link string
+ ReproLevel dashapi.ReproLevel
+ Reported time.Time
+ Closed time.Time
+}
+
+type Crash struct {
+ Manager string
+ BuildID string
+ Time time.Time
+ Maintainers []string `datastore:",noindex"`
+ Log int64 // reference to CrashLog text entity
+ Report int64 // reference to CrashReport text entity
+ ReproOpts []byte `datastore:",noindex"`
+ ReproSyz int64 // reference to ReproSyz text entity
+ ReproC int64 // reference to ReproC text entity
+ ReportLen int
+}
+
+// ReportingState holds dynamic info associated with reporting.
+type ReportingState struct {
+ Entries []ReportingStateEntry
+}
+
+type ReportingStateEntry struct {
+ Namespace string
+ Name string
+ // Current reporting quota consumption.
+ Sent int
+ Date int
+}
+
+// Text holds text blobs (crash logs, reports, reproducers, etc).
+type Text struct {
+ Namespace string
+ Text []byte `datastore:",noindex"` // gzip-compressed text
+}
+
+const (
+ BugStatusOpen = iota
+)
+
+const (
+ BugStatusFixed = 1000 + iota
+ BugStatusInvalid
+ BugStatusDup
+)
+
+const (
+ ReproLevelNone = dashapi.ReproLevelNone
+ ReproLevelSyz = dashapi.ReproLevelSyz
+ ReproLevelC = dashapi.ReproLevelC
+)
+
+func buildKey(c context.Context, ns, id string) *datastore.Key {
+ if ns == "" {
+ panic("requesting build key outside of namespace")
+ }
+ h := hash.String([]byte(fmt.Sprintf("%v-%v", ns, id)))
+ return datastore.NewKey(c, "Build", h, 0, nil)
+}
+
+func loadBuild(c context.Context, ns, id string) (*Build, error) {
+ build := new(Build)
+ if err := datastore.Get(c, buildKey(c, ns, id), build); err != nil {
+ if err == datastore.ErrNoSuchEntity {
+ return nil, fmt.Errorf("unknown build %v/%v", ns, id)
+ }
+ return nil, fmt.Errorf("failed to get build %v/%v: %v", ns, id, err)
+ }
+ return build, nil
+}
+
+func (bug *Bug) displayTitle() string {
+ if bug.Seq == 0 {
+ return bug.Title
+ }
+ return fmt.Sprintf("%v (%v)", bug.Title, bug.Seq+1)
+}
+
+func canonicalBug(c context.Context, bug *Bug) (*Bug, error) {
+ for {
+ if bug.Status != BugStatusDup {
+ return bug, nil
+ }
+ canon := new(Bug)
+ bugKey := datastore.NewKey(c, "Bug", bug.DupOf, 0, nil)
+ if err := datastore.Get(c, bugKey, canon); err != nil {
+ return nil, fmt.Errorf("failed to get dup bug %q for %q: %v",
+ bug.DupOf, bugKeyHash(bug.Namespace, bug.Title, bug.Seq), err)
+ }
+ bug = canon
+ }
+}
+
+func bugKeyHash(ns, title string, seq int64) string {
+ return hash.String([]byte(fmt.Sprintf("%v-%v-%v-%v", config.Namespaces[ns].Key, ns, title, seq)))
+}
+
+func bugReportingHash(bugHash, reporting string) string {
+ return hash.String([]byte(fmt.Sprintf("%v-%v", bugHash, reporting)))
+}
+
+func textLink(tag string, id int64) string {
+ if id == 0 {
+ return ""
+ }
+ return fmt.Sprintf("/text?tag=%v&id=%v", tag, id)
+}
diff --git a/dashboard/app/error.html b/dashboard/app/error.html
new file mode 100644
index 000000000..aec5f52e1
--- /dev/null
+++ b/dashboard/app/error.html
@@ -0,0 +1,12 @@
+<!doctype html>
+<html>
+<head>
+ <title>Syzkaller Dashboard</title>
+ <link rel="stylesheet" href="/static/style.css"/>
+</head>
+<body>
+ {{.}}
+ <br>
+ <a href="javascript:history.back()">back</a>
+</body>
+</html>
diff --git a/dashboard/app/fix_test.go b/dashboard/app/fix_test.go
new file mode 100644
index 000000000..789e3c204
--- /dev/null
+++ b/dashboard/app/fix_test.go
@@ -0,0 +1,415 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+// +build aetest
+
+package dash
+
+import (
+ "testing"
+
+ "github.com/google/syzkaller/dashboard/dashapi"
+)
+
+func reportAllBugs(c *Ctx, expect int) []*dashapi.BugReport {
+ pr := &dashapi.PollRequest{
+ Type: "test",
+ }
+ resp := new(dashapi.PollResponse)
+ c.expectOK(c.API(client1, key1, "reporting_poll", pr, resp))
+ if len(resp.Reports) != expect {
+ c.t.Fatalf("\n%v: want %v reports, got %v", caller(0), expect, len(resp.Reports))
+ }
+ for _, rep := range resp.Reports {
+ cmd := &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusOpen,
+ }
+ reply := new(dashapi.BugUpdateReply)
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+ }
+ return resp.Reports
+}
+
+// Basic scenario of marking a bug as fixed by a particular commit,
+// discovering this commit on builder and marking the bug as ultimately fixed.
+func TestFixBasic(t *testing.T) {
+ c := NewCtx(t)
+ defer c.Close()
+
+ build1 := testBuild(1)
+ c.expectOK(c.API(client1, key1, "upload_build", build1, nil))
+
+ crash1 := testCrash(build1, 1)
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+
+ builderPollReq := &dashapi.BuilderPollReq{build1.Manager}
+ builderPollResp := new(dashapi.BuilderPollResp)
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 0)
+
+ reports := reportAllBugs(c, 1)
+ rep := reports[0]
+
+ // Specify fixing commit for the bug.
+ cmd := &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusOpen,
+ FixCommits: []string{"foo: fix the crash"},
+ }
+ reply := new(dashapi.BugUpdateReply)
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ // Check that the commit is now passed to builders.
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 1)
+ c.expectEQ(builderPollResp.PendingCommits[0], "foo: fix the crash")
+
+ // Patches must not be reset on other actions.
+ cmd = &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusOpen,
+ }
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ // Upstream commands must fail if patches are already present.
+ // Right course of action is unclear in this situation,
+ // so this test merely documents the current behavior.
+ cmd = &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusUpstream,
+ }
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, false)
+
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+ reportAllBugs(c, 0)
+
+ // Upload another build with the commit present.
+ build2 := testBuild(2)
+ build2.Manager = build1.Manager
+ build2.Commits = []string{"foo: fix the crash"}
+ c.expectOK(c.API(client1, key1, "upload_build", build2, nil))
+
+ // Check that the commit is now not passed to this builder.
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 0)
+
+ // Ensure that a new crash creates a new bug (the old one must be marked as fixed).
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+ reports = reportAllBugs(c, 1)
+ c.expectEQ(reports[0].Title, "title1 (2)")
+}
+
+// Test bug that is fixed by 2 commits.
+func TestFixedByTwoCommits(t *testing.T) {
+ c := NewCtx(t)
+ defer c.Close()
+
+ build1 := testBuild(1)
+ c.expectOK(c.API(client1, key1, "upload_build", build1, nil))
+
+ crash1 := testCrash(build1, 1)
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+
+ builderPollReq := &dashapi.BuilderPollReq{build1.Manager}
+ builderPollResp := new(dashapi.BuilderPollResp)
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 0)
+
+ reports := reportAllBugs(c, 1)
+ rep := reports[0]
+
+ // Specify fixing commit for the bug.
+ cmd := &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusOpen,
+ FixCommits: []string{"bar: prepare for fixing", "foo: fix the crash"},
+ }
+ reply := new(dashapi.BugUpdateReply)
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ // Check that the commit is now passed to builders.
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 2)
+ c.expectEQ(builderPollResp.PendingCommits[0], "bar: prepare for fixing")
+ c.expectEQ(builderPollResp.PendingCommits[1], "foo: fix the crash")
+
+ // Upload another build with only one of the commits.
+ build2 := testBuild(2)
+ build2.Manager = build1.Manager
+ build2.Commits = []string{"bar: prepare for fixing"}
+ c.expectOK(c.API(client1, key1, "upload_build", build2, nil))
+
+ // Check that it has not fixed the bug.
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 2)
+ c.expectEQ(builderPollResp.PendingCommits[0], "bar: prepare for fixing")
+ c.expectEQ(builderPollResp.PendingCommits[1], "foo: fix the crash")
+
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+ reportAllBugs(c, 0)
+
+ // Now upload build with both commits.
+ build3 := testBuild(3)
+ build3.Manager = build1.Manager
+ build3.Commits = []string{"foo: fix the crash", "bar: prepare for fixing"}
+ c.expectOK(c.API(client1, key1, "upload_build", build3, nil))
+
+ // Check that the commit is now not passed to this builder.
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 0)
+
+ // Ensure that a new crash creates a new bug (the old one must be marked as fixed).
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+ reports = reportAllBugs(c, 1)
+ c.expectEQ(reports[0].Title, "title1 (2)")
+}
+
+// A bug is marked as fixed by one commit and then remarked as fixed by another.
+func TestReFixed(t *testing.T) {
+ c := NewCtx(t)
+ defer c.Close()
+
+ build1 := testBuild(1)
+ c.expectOK(c.API(client1, key1, "upload_build", build1, nil))
+
+ crash1 := testCrash(build1, 1)
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+
+ builderPollReq := &dashapi.BuilderPollReq{build1.Manager}
+ builderPollResp := new(dashapi.BuilderPollResp)
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 0)
+
+ reports := reportAllBugs(c, 1)
+ rep := reports[0]
+
+ // Specify fixing commit for the bug.
+ cmd := &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusOpen,
+ FixCommits: []string{"a wrong one"},
+ }
+ reply := new(dashapi.BugUpdateReply)
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ cmd = &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusOpen,
+ FixCommits: []string{"the right one"},
+ }
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 1)
+ c.expectEQ(builderPollResp.PendingCommits[0], "the right one")
+
+ // Upload another build with the wrong commit.
+ build2 := testBuild(2)
+ build2.Manager = build1.Manager
+ build2.Commits = []string{"a wrong one"}
+ c.expectOK(c.API(client1, key1, "upload_build", build2, nil))
+
+ // Check that it has not fixed the bug.
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 1)
+ c.expectEQ(builderPollResp.PendingCommits[0], "the right one")
+
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+ reportAllBugs(c, 0)
+
+ // Now upload build with the right commit.
+ build3 := testBuild(3)
+ build3.Manager = build1.Manager
+ build3.Commits = []string{"the right one"}
+ c.expectOK(c.API(client1, key1, "upload_build", build3, nil))
+
+ // Check that the commit is now not passed to this builder.
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 0)
+}
+
+// Fixing commit is present on one manager, but missing on another.
+func TestFixTwoManagers(t *testing.T) {
+ c := NewCtx(t)
+ defer c.Close()
+
+ build1 := testBuild(1)
+ c.expectOK(c.API(client1, key1, "upload_build", build1, nil))
+
+ crash1 := testCrash(build1, 1)
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+
+ builderPollReq := &dashapi.BuilderPollReq{build1.Manager}
+ builderPollResp := new(dashapi.BuilderPollResp)
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 0)
+
+ reports := reportAllBugs(c, 1)
+ rep := reports[0]
+
+ // Specify fixing commit for the bug.
+ cmd := &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusOpen,
+ FixCommits: []string{"foo: fix the crash"},
+ }
+ reply := new(dashapi.BugUpdateReply)
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ // Now the second manager appears.
+ build2 := testBuild(2)
+ c.expectOK(c.API(client1, key1, "upload_build", build2, nil))
+
+ // Check that the commit is now passed to builders.
+ builderPollReq = &dashapi.BuilderPollReq{build1.Manager}
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 1)
+ c.expectEQ(builderPollResp.PendingCommits[0], "foo: fix the crash")
+
+ builderPollReq = &dashapi.BuilderPollReq{build2.Manager}
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 1)
+ c.expectEQ(builderPollResp.PendingCommits[0], "foo: fix the crash")
+
+ // Now first manager picks up the commit.
+ build3 := testBuild(3)
+ build3.Manager = build1.Manager
+ build3.Commits = []string{"foo: fix the crash"}
+ c.expectOK(c.API(client1, key1, "upload_build", build3, nil))
+
+ // Check that the commit is now not passed to this builder.
+ builderPollReq = &dashapi.BuilderPollReq{build1.Manager}
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 0)
+
+ // But still passed to another.
+ builderPollReq = &dashapi.BuilderPollReq{build2.Manager}
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 1)
+ c.expectEQ(builderPollResp.PendingCommits[0], "foo: fix the crash")
+
+ // Check that the bug is still open.
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+ reportAllBugs(c, 0)
+
+ // Now the second manager picks up the commit.
+ build4 := testBuild(4)
+ build4.Manager = build2.Manager
+ build4.Commits = []string{"foo: fix the crash"}
+ c.expectOK(c.API(client1, key1, "upload_build", build4, nil))
+
+ // Now the bug must be fixed.
+ builderPollReq = &dashapi.BuilderPollReq{build2.Manager}
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 0)
+
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+ reports = reportAllBugs(c, 1)
+ c.expectEQ(reports[0].Title, "title1 (2)")
+}
+
+func TestReFixedTwoManagers(t *testing.T) {
+ c := NewCtx(t)
+ defer c.Close()
+
+ build1 := testBuild(1)
+ c.expectOK(c.API(client1, key1, "upload_build", build1, nil))
+
+ crash1 := testCrash(build1, 1)
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+
+ builderPollReq := &dashapi.BuilderPollReq{build1.Manager}
+ builderPollResp := new(dashapi.BuilderPollResp)
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 0)
+
+ reports := reportAllBugs(c, 1)
+ rep := reports[0]
+
+ // Specify fixing commit for the bug.
+ cmd := &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusOpen,
+ FixCommits: []string{"foo: fix the crash"},
+ }
+ reply := new(dashapi.BugUpdateReply)
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ // Now the second manager appears.
+ build2 := testBuild(2)
+ c.expectOK(c.API(client1, key1, "upload_build", build2, nil))
+
+ // Now first manager picks up the commit.
+ build3 := testBuild(3)
+ build3.Manager = build1.Manager
+ build3.Commits = []string{"foo: fix the crash"}
+ c.expectOK(c.API(client1, key1, "upload_build", build3, nil))
+
+ builderPollReq = &dashapi.BuilderPollReq{build1.Manager}
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 0)
+
+ // Now we change the fixing commit.
+ cmd = &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusOpen,
+ FixCommits: []string{"the right one"},
+ }
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ // Now it must again appear on both managers.
+ builderPollReq = &dashapi.BuilderPollReq{build1.Manager}
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 1)
+ c.expectEQ(builderPollResp.PendingCommits[0], "the right one")
+
+ builderPollReq = &dashapi.BuilderPollReq{build2.Manager}
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 1)
+ c.expectEQ(builderPollResp.PendingCommits[0], "the right one")
+
+ // Now the second manager picks up the second commit.
+ build4 := testBuild(4)
+ build4.Manager = build2.Manager
+ build4.Commits = []string{"the right one"}
+ c.expectOK(c.API(client1, key1, "upload_build", build4, nil))
+
+ // The bug must be still open.
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+ reportAllBugs(c, 0)
+
+ // Specify fixing commit again, but it's the same one as before, so nothing changed.
+ cmd = &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusOpen,
+ FixCommits: []string{"the right one"},
+ }
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ // Now the first manager picks up the second commit.
+ build5 := testBuild(5)
+ build5.Manager = build1.Manager
+ build5.Commits = []string{"the right one"}
+ c.expectOK(c.API(client1, key1, "upload_build", build5, nil))
+
+ // Now the bug must be fixed.
+ builderPollReq = &dashapi.BuilderPollReq{build1.Manager}
+ c.expectOK(c.API(client1, key1, "builder_poll", builderPollReq, builderPollResp))
+ c.expectEQ(len(builderPollResp.PendingCommits), 0)
+
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+ reports = reportAllBugs(c, 1)
+ c.expectEQ(reports[0].Title, "title1 (2)")
+}
diff --git a/dashboard/app/handler.go b/dashboard/app/handler.go
new file mode 100644
index 000000000..1a9ec925e
--- /dev/null
+++ b/dashboard/app/handler.go
@@ -0,0 +1,86 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package dash
+
+import (
+ "fmt"
+ "html/template"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/google/syzkaller/dashboard/dashapi"
+ "golang.org/x/net/context"
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/log"
+ "google.golang.org/appengine/user"
+)
+
+// This file contains common middleware for UI handlers (auth, html templates, etc).
+
+type contextHandler func(c context.Context, w http.ResponseWriter, r *http.Request) error
+
+func handlerWrapper(fn contextHandler) http.Handler {
+ return handleContext(handleAuth(fn))
+}
+
+func handleContext(fn contextHandler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+ if err := fn(c, w, r); err != nil {
+ log.Errorf(c, "%v", err)
+ w.WriteHeader(http.StatusInternalServerError)
+ if err1 := templates.ExecuteTemplate(w, "error.html", err.Error()); err1 != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+ }
+ })
+}
+
+func handleAuth(fn contextHandler) contextHandler {
+ return func(c context.Context, w http.ResponseWriter, r *http.Request) error {
+ u := user.Current(c)
+ if u == nil {
+ return fmt.Errorf("sign-in required")
+ }
+ if !u.Admin && (u.AuthDomain != "gmail.com" ||
+ !strings.HasSuffix(u.Email, config.AuthDomain)) {
+ log.Errorf(c, "unauthorized user: domain='%v' email='%v'", u.AuthDomain, u.Email)
+ return fmt.Errorf("%v is not authorized to view this", u.Email)
+ }
+ return fn(c, w, r)
+ }
+}
+
+type uiHeader struct {
+}
+
+func commonHeader(c context.Context) (*uiHeader, error) {
+ h := &uiHeader{}
+ return h, nil
+}
+
+func formatTime(t time.Time) string {
+ return t.Format("Jan 02 15:04")
+}
+
+func formatReproLevel(l dashapi.ReproLevel) string {
+ switch l {
+ case ReproLevelSyz:
+ return "syz"
+ case ReproLevelC:
+ return "C"
+ default:
+ return ""
+ }
+}
+
+var (
+ templates = template.Must(template.New("").Funcs(templateFuncs).ParseGlob("*.html"))
+
+ templateFuncs = template.FuncMap{
+ "formatTime": formatTime,
+ "formatReproLevel": formatReproLevel,
+ }
+)
diff --git a/dashboard/app/index.yaml b/dashboard/app/index.yaml
new file mode 100644
index 000000000..a981c90d6
--- /dev/null
+++ b/dashboard/app/index.yaml
@@ -0,0 +1,41 @@
+indexes:
+
+# AUTOGENERATED
+
+# This index.yaml is automatically updated whenever the dev_appserver
+# detects that a new type of query is run. If you want to manage the
+# index.yaml file manually, remove the above marker line (the line
+# saying "# AUTOGENERATED"). If you want to manage some indexes
+# manually, move them above the marker line. The index.yaml file is
+# automatically uploaded to the admin console when you next deploy
+# your application using appcfg.py.
+
+- kind: Bug
+ properties:
+ - name: Namespace
+ - name: Status
+
+- kind: Build
+ properties:
+ - name: Namespace
+ - name: Manager
+
+- kind: Crash
+ ancestor: yes
+ properties:
+ - name: ReproC
+ - name: ReproSyz
+ - name: Report
+ - name: Time
+
+- kind: Crash
+ ancestor: yes
+ properties:
+ - name: ReproC
+ direction: desc
+ - name: ReproSyz
+ direction: desc
+ - name: ReportLen
+ direction: desc
+ - name: Time
+ direction: desc
diff --git a/dashboard/app/mail_bug.txt b/dashboard/app/mail_bug.txt
new file mode 100644
index 000000000..5357fca19
--- /dev/null
+++ b/dashboard/app/mail_bug.txt
@@ -0,0 +1,10 @@
+Hello,
+
+syzkaller hit the following crash on {{.KernelCommit}}
+{{.KernelRepo}}/{{.KernelBranch}}
+compiler: {{.CompilerID}}
+{{if .ReproC}}C reproducer is attached{{end}}{{if .ReproSyz}}syzkaller reproducer is attached{{end}}
+.config is attached
+
+
+{{.Report}}
diff --git a/dashboard/app/main.go b/dashboard/app/main.go
new file mode 100644
index 000000000..b56706f03
--- /dev/null
+++ b/dashboard/app/main.go
@@ -0,0 +1,272 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package dash
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/google/syzkaller/dashboard/dashapi"
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/datastore"
+ "google.golang.org/appengine/log"
+)
+
+// This file contains web UI http handlers.
+
+func init() {
+ http.Handle("/", handlerWrapper(handleMain))
+ http.Handle("/bug", handlerWrapper(handleBug))
+ http.Handle("/text", handlerWrapper(handleText))
+}
+
+type uiMain struct {
+ Header *uiHeader
+ Log []byte
+ BugGroups []*uiBugGroup
+}
+
+type uiBugPage struct {
+ Header *uiHeader
+ Bug *uiBug
+ Crashes []*uiCrash
+}
+
+type uiBugGroup struct {
+ Namespace string
+ Bugs []*uiBug
+}
+
+type uiBug struct {
+ Namespace string
+ ID string
+ Title string
+ NumCrashes int64
+ FirstTime time.Time
+ LastTime time.Time
+ ReproLevel dashapi.ReproLevel
+ ReportingIndex int
+ Status string
+ Link string
+ Commits string
+}
+
+type uiCrash struct {
+ Manager string
+ Time time.Time
+ Maintainers string
+ LogLink string
+ ReportLink string
+ ReproSyzLink string
+ ReproCLink string
+}
+
+// handleMain serves main page.
+func handleMain(c context.Context, w http.ResponseWriter, r *http.Request) error {
+ h, err := commonHeader(c)
+ if err != nil {
+ return err
+ }
+ errorLog, err := fetchErrorLogs(c)
+ if err != nil {
+ return err
+ }
+ groups, err := fetchBugs(c)
+ if err != nil {
+ return err
+ }
+ data := &uiMain{
+ Header: h,
+ Log: errorLog,
+ BugGroups: groups,
+ }
+ return templates.ExecuteTemplate(w, "main.html", data)
+}
+
+// handleBug serves page about a single bug (which is passed in id argument).
+func handleBug(c context.Context, w http.ResponseWriter, r *http.Request) error {
+ bug := new(Bug)
+ bugKey := datastore.NewKey(c, "Bug", r.FormValue("id"), 0, nil)
+ if err := datastore.Get(c, bugKey, bug); err != nil {
+ return err
+ }
+ h, err := commonHeader(c)
+ if err != nil {
+ return err
+ }
+ state, err := loadReportingState(c)
+ if err != nil {
+ return err
+ }
+ uiBug := createUIBug(c, bug, state)
+ crashes, err := loadCrashesForBug(c, bug)
+ if err != nil {
+ return err
+ }
+ data := &uiBugPage{
+ Header: h,
+ Bug: uiBug,
+ Crashes: crashes,
+ }
+ return templates.ExecuteTemplate(w, "bug.html", data)
+}
+
+// handleText serves plain text blobs (crash logs, reports, reproducers, etc).
+func handleText(c context.Context, w http.ResponseWriter, r *http.Request) error {
+ id, err := strconv.ParseInt(r.FormValue("id"), 10, 64)
+ if err != nil {
+ return fmt.Errorf("failed to parse text id: %v", err)
+ }
+ data, err := getText(c, r.FormValue("tag"), id)
+ if err != nil {
+ return err
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.Write(data)
+ return nil
+}
+
+func fetchBugs(c context.Context) ([]*uiBugGroup, error) {
+ var bugs []*Bug
+ _, err := datastore.NewQuery("Bug").
+ Filter("Status=", BugStatusOpen).
+ GetAll(c, &bugs)
+ if err != nil {
+ return nil, err
+ }
+ state, err := loadReportingState(c)
+ if err != nil {
+ return nil, err
+ }
+ groups := make(map[string][]*uiBug)
+ for _, bug := range bugs {
+ uiBug := createUIBug(c, bug, state)
+ groups[bug.Namespace] = append(groups[bug.Namespace], uiBug)
+ }
+ var res []*uiBugGroup
+ for ns, bugs := range groups {
+ sort.Sort(uiBugSorter(bugs))
+ res = append(res, &uiBugGroup{
+ Namespace: ns,
+ Bugs: bugs,
+ })
+ }
+ sort.Sort(uiBugGroupSorter(res))
+ return res, nil
+}
+
+func createUIBug(c context.Context, bug *Bug, state *ReportingState) *uiBug {
+ _, _, reportingIdx, status, link, err := needReport(c, "", state, bug)
+ if err != nil {
+ status = err.Error()
+ }
+ if status == "" {
+ status = "???"
+ }
+ uiBug := &uiBug{
+ Namespace: bug.Namespace,
+ ID: bugKeyHash(bug.Namespace, bug.Title, bug.Seq),
+ Title: bug.displayTitle(),
+ NumCrashes: bug.NumCrashes,
+ FirstTime: bug.FirstTime,
+ LastTime: bug.LastTime,
+ ReproLevel: bug.ReproLevel,
+ ReportingIndex: reportingIdx,
+ Status: status,
+ Link: link,
+ Commits: fmt.Sprintf("%q", bug.Commits),
+ }
+ return uiBug
+}
+
+func loadCrashesForBug(c context.Context, bug *Bug) ([]*uiCrash, error) {
+ bugHash := bugKeyHash(bug.Namespace, bug.Title, bug.Seq)
+ bugKey := datastore.NewKey(c, "Bug", bugHash, 0, nil)
+ crashes, err := queryCrashesForBug(c, bugKey, 100)
+ if err != nil {
+ return nil, err
+ }
+ var results []*uiCrash
+ for _, crash := range crashes {
+ ui := &uiCrash{
+ Manager: crash.Manager,
+ Time: crash.Time,
+ Maintainers: fmt.Sprintf("%q", crash.Maintainers),
+ LogLink: textLink("CrashLog", crash.Log),
+ ReportLink: textLink("CrashReport", crash.Report),
+ ReproSyzLink: textLink("ReproSyz", crash.ReproSyz),
+ ReproCLink: textLink("ReproC", crash.ReproC),
+ }
+ results = append(results, ui)
+ }
+ return results, nil
+}
+
+func fetchErrorLogs(c context.Context) ([]byte, error) {
+ const (
+ minLogLevel = 2
+ maxLines = 100
+ reportPeriod = 7 * 24 * time.Hour
+ )
+ q := &log.Query{
+ StartTime: time.Now().Add(-reportPeriod),
+ AppLogs: true,
+ ApplyMinLevel: true,
+ MinLevel: minLogLevel,
+ }
+ result := q.Run(c)
+ var lines []string
+ for i := 0; i < maxLines; i++ {
+ rec, err := result.Next()
+ if rec == nil {
+ break
+ }
+ if err != nil {
+ entry := fmt.Sprintf("ERROR FETCHING LOGS: %v\n", err)
+ lines = append(lines, entry)
+ break
+ }
+ for _, al := range rec.AppLogs {
+ if al.Level < minLogLevel {
+ continue
+ }
+ text := strings.Replace(al.Message, "\n", " ", -1)
+ entry := fmt.Sprintf("%v: %v (%v)\n", formatTime(al.Time), text, rec.Resource)
+ lines = append(lines, entry)
+ }
+ }
+ buf := new(bytes.Buffer)
+ for i := len(lines) - 1; i >= 0; i-- {
+ buf.WriteString(lines[i])
+ }
+ return buf.Bytes(), nil
+}
+
+type uiBugSorter []*uiBug
+
+func (a uiBugSorter) Len() int { return len(a) }
+func (a uiBugSorter) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a uiBugSorter) Less(i, j int) bool {
+ if a[i].ReportingIndex != a[j].ReportingIndex {
+ return a[i].ReportingIndex > a[j].ReportingIndex
+ }
+ if (a[i].Link != "") != (a[j].Link != "") {
+ return a[i].Link != ""
+ }
+ if a[i].ReproLevel != a[j].ReproLevel {
+ return a[i].ReproLevel > a[j].ReproLevel
+ }
+ return a[i].FirstTime.After(a[j].FirstTime)
+}
+
+type uiBugGroupSorter []*uiBugGroup
+
+func (a uiBugGroupSorter) Len() int { return len(a) }
+func (a uiBugGroupSorter) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a uiBugGroupSorter) Less(i, j int) bool { return a[i].Namespace < a[j].Namespace }
diff --git a/dashboard/app/main.html b/dashboard/app/main.html
new file mode 100644
index 000000000..4ea56c763
--- /dev/null
+++ b/dashboard/app/main.html
@@ -0,0 +1,44 @@
+{{define "bug_table"}}
+ <table class="list_table">
+ <caption>{{.Namespace}} ({{len $.Bugs}}):</caption>
+ <tr>
+ <th>Title</th>
+ <th>Count</th>
+ <th>Repro</th>
+ <th>Last</th>
+ <th>Status</th>
+ </tr>
+ {{range $b := $.Bugs}}
+ <tr>
+ <td class="title"><a href="/bug?id={{$b.ID}}">{{$b.Title}}</a></td>
+ <td class="count">{{$b.NumCrashes}}</td>
+ <td class="repro">{{formatReproLevel $b.ReproLevel}}</td>
+ <td class="time">{{formatTime $b.LastTime}}</td>
+ <td class="status">{{if $b.Link}}<a href="{{$b.Link}}">{{$b.Status}}</a>{{else}}{{$b.Status}}{{end}}</td>
+ </tr>
+ {{end}}
+ </table>
+{{end}}
+
+<!doctype html>
+<html>
+<head>
+ <title>Syzkaller Dashboard</title>
+ <link rel="stylesheet" href="/static/style.css"/>
+</head>
+<body>
+ {{template "header" .Header}}
+
+ <b>Error log:</b><br>
+ <textarea id="log_textarea" readonly rows="20" wrap=off>{{printf "%s" .Log}}</textarea>
+ <script>
+ var textarea = document.getElementById("log_textarea");
+ textarea.scrollTop = textarea.scrollHeight;
+ </script>
+ <br><br>
+
+ {{range $g := $.BugGroups}}
+ {{template "bug_table" $g}} <br>
+ {{end}}
+</body>
+</html>
diff --git a/dashboard/app/reporting.go b/dashboard/app/reporting.go
new file mode 100644
index 000000000..78584642a
--- /dev/null
+++ b/dashboard/app/reporting.go
@@ -0,0 +1,529 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package dash
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "sort"
+ "time"
+
+ "github.com/google/syzkaller/dashboard/dashapi"
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/datastore"
+ "google.golang.org/appengine/log"
+)
+
+// Backend-independent reporting logic.
+// Two main entry points:
+// - reportingPoll is called by backends to get list of bugs that need to be reported.
+// - incomingCommand is called by backends to update bug statuses.
+
+const (
+ maxMailLogLen = 1 << 20
+ maxMailReportLen = 64 << 10
+ internalError = "internal error"
+)
+
+// reportingPoll is called by backends to get list of bugs that need to be reported.
+func reportingPoll(c context.Context, typ string) []*dashapi.BugReport {
+ state, err := loadReportingState(c)
+ if err != nil {
+ log.Errorf(c, "%v", err)
+ return nil
+ }
+ var bugs []*Bug
+ _, err = datastore.NewQuery("Bug").
+ Filter("Status<", BugStatusFixed).
+ GetAll(c, &bugs)
+ if err != nil {
+ log.Errorf(c, "%v", err)
+ return nil
+ }
+ log.Infof(c, "fetched %v bugs", len(bugs))
+ sort.Sort(bugReportSorter(bugs))
+ var reports []*dashapi.BugReport
+ for _, bug := range bugs {
+ rep, err := handleReportBug(c, typ, state, bug)
+ if err != nil {
+ log.Errorf(c, "%v: failed to report bug %v: %v", bug.Namespace, bug.Title, err)
+ continue
+ }
+ if rep == nil {
+ continue
+ }
+ reports = append(reports, rep)
+ }
+ return reports
+}
+
+func handleReportBug(c context.Context, typ string, state *ReportingState, bug *Bug) (*dashapi.BugReport, error) {
+ reporting, bugReporting, _, _, _, err := needReport(c, typ, state, bug)
+ if err != nil || reporting == nil {
+ return nil, err
+ }
+ rep, err := createBugReport(c, bug, bugReporting.ID, reporting.Config)
+ if err != nil {
+ return nil, err
+ }
+ log.Infof(c, "bug %q: reporting to %v", bug.Title, reporting.Name)
+ return rep, nil
+}
+
+func needReport(c context.Context, typ string, state *ReportingState, bug *Bug) (reporting *Reporting, bugReporting *BugReporting, reportingIdx int, status, link string, err error) {
+ reporting, bugReporting, reportingIdx, status, err = currentReporting(c, bug)
+ if err != nil || reporting == nil {
+ return
+ }
+ if typ != "" && typ != reporting.Config.Type() {
+ status = "on a different reporting"
+ reporting = nil
+ bugReporting = nil
+ return
+ }
+ link = bugReporting.Link
+ if !bugReporting.Reported.IsZero() && bugReporting.ReproLevel >= bug.ReproLevel {
+ status = fmt.Sprintf("%v: reported%v on %v",
+ reporting.Name, reproStr(bugReporting.ReproLevel),
+ formatTime(bugReporting.Reported))
+ reporting = nil
+ bugReporting = nil
+ return
+ }
+ ent := state.getEntry(timeNow(c), bug.Namespace, reporting.Name)
+ // Limit number of reports sent per day,
+ // but don't limit sending repros to already reported bugs.
+ if bugReporting.Reported.IsZero() && reporting.DailyLimit != 0 &&
+ ent.Sent >= reporting.DailyLimit {
+ status = fmt.Sprintf("%v: out of quota for today", reporting.Name)
+ reporting = nil
+ bugReporting = nil
+ return
+ }
+ if bugReporting.Reported.IsZero() {
+ // This update won't be committed, but it will prevent us from
+ // reporting too many bugs in a single poll.
+ ent.Sent++
+ }
+ cfg := config.Namespaces[bug.Namespace]
+ if bug.ReproLevel < ReproLevelC && timeSince(c, bug.FirstTime) < cfg.WaitForRepro {
+ status = fmt.Sprintf("%v: waiting for C repro", reporting.Name)
+ reporting = nil
+ bugReporting = nil
+ return
+ }
+ if !cfg.MailWithoutReport && !bug.HasReport {
+ status = fmt.Sprintf("%v: no report", reporting.Name)
+ reporting = nil
+ bugReporting = nil
+ return
+ }
+
+ // Ready to be reported.
+ status = fmt.Sprintf("%v: ready to report", reporting.Name)
+ if !bugReporting.Reported.IsZero() {
+ status += fmt.Sprintf(" (reported%v on %v)",
+ reproStr(bugReporting.ReproLevel), formatTime(bugReporting.Reported))
+ }
+ return
+}
+
+func currentReporting(c context.Context, bug *Bug) (*Reporting, *BugReporting, int, string, error) {
+ for i := range bug.Reporting {
+ bugReporting := &bug.Reporting[i]
+ if !bugReporting.Closed.IsZero() {
+ continue
+ }
+ reporting := config.Namespaces[bug.Namespace].ReportingByName(bugReporting.Name)
+ if reporting == nil {
+ return nil, nil, 0, "", fmt.Errorf("%v: missing in config", bugReporting.Name)
+ }
+ if reporting.Status == ReportingDisabled {
+ continue
+ }
+ if reporting.Status == ReportingSuspended {
+ return nil, nil, 0, fmt.Sprintf("%v: reporting suspended"), nil
+ }
+ return reporting, bugReporting, i, "", nil
+ }
+ return nil, nil, 0, "", fmt.Errorf("no reporting left")
+}
+
+func reproStr(level dashapi.ReproLevel) string {
+ switch level {
+ case ReproLevelSyz:
+ return " syz repro"
+ case ReproLevelC:
+ return " C repro"
+ default:
+ return ""
+ }
+}
+
+func createBugReport(c context.Context, bug *Bug, id string, config interface{}) (*dashapi.BugReport, error) {
+ reportingConfig, err := json.Marshal(config)
+ if err != nil {
+ return nil, err
+ }
+ bugKey := datastore.NewKey(c, "Bug", bugKeyHash(bug.Namespace, bug.Title, bug.Seq), 0, nil)
+ crash, err := findCrashForBug(c, bug, bugKey)
+ if err != nil {
+ return nil, err
+ }
+ crashLog, err := getText(c, "CrashLog", crash.Log)
+ if err != nil {
+ return nil, err
+ }
+ if len(crashLog) > maxMailLogLen {
+ crashLog = crashLog[len(crashLog)-maxMailLogLen:]
+ }
+ report, err := getText(c, "CrashReport", crash.Report)
+ if err != nil {
+ return nil, err
+ }
+ if len(report) > maxMailReportLen {
+ report = report[:maxMailReportLen]
+ }
+ reproC, err := getText(c, "ReproC", crash.ReproC)
+ if err != nil {
+ return nil, err
+ }
+ reproSyz, err := getText(c, "ReproSyz", crash.ReproSyz)
+ if err != nil {
+ return nil, err
+ }
+ if len(reproSyz) != 0 && len(crash.ReproOpts) != 0 {
+ tmp := append([]byte{'#'}, crash.ReproOpts...)
+ tmp = append(tmp, '\n')
+ tmp = append(tmp, reproSyz...)
+ reproSyz = tmp
+ }
+ build, err := loadBuild(c, bug.Namespace, crash.BuildID)
+ if err != nil {
+ return nil, err
+ }
+ kernelConfig, err := getText(c, "KernelConfig", build.KernelConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ rep := &dashapi.BugReport{
+ Config: reportingConfig,
+ ID: id,
+ Title: bug.displayTitle(),
+ Log: crashLog,
+ Report: report,
+ Maintainers: crash.Maintainers,
+ CompilerID: build.CompilerID,
+ KernelRepo: build.KernelRepo,
+ KernelBranch: build.KernelBranch,
+ KernelCommit: build.KernelCommit,
+ KernelConfig: kernelConfig,
+ ReproC: reproC,
+ ReproSyz: reproSyz,
+ }
+ return rep, nil
+}
+
+// incomingCommand is entry point to bug status updates.
+func incomingCommand(c context.Context, cmd *dashapi.BugUpdate) (string, bool) {
+ log.Infof(c, "got command: %+v", cmd)
+ reply, err := incomingCommandImpl(c, cmd)
+ if err != nil {
+ log.Errorf(c, "%v", err)
+ return reply, false
+ }
+ return reply, true
+}
+
+func incomingCommandImpl(c context.Context, cmd *dashapi.BugUpdate) (string, error) {
+ bug, bugKey, err := findBugByReportingID(c, cmd.ID)
+ if err != nil {
+ return "can't find the corresponding bug", err
+ }
+ now := timeNow(c)
+ dupHash := ""
+ if cmd.Status == dashapi.BugStatusDup {
+ dup, dupKey, err := findBugByReportingID(c, cmd.DupOf)
+ if err != nil {
+ return "can't find the dup bug", err
+ }
+ if bugKey.StringID() == dupKey.StringID() {
+ return "can't dup bug to itself", fmt.Errorf("can't dup bug to itself")
+ }
+ if bug.Namespace != dup.Namespace {
+ return "can't find the dup bug",
+ fmt.Errorf("inter-namespace dup: %v->%v", bug.Namespace, dup.Namespace)
+ }
+ bugReporting, _ := bugReportingByID(bug, cmd.ID, now)
+ dupReporting, _ := bugReportingByID(dup, cmd.DupOf, now)
+ if bugReporting == nil || dupReporting == nil {
+ return internalError, fmt.Errorf("can't find bug reporting")
+ }
+ if !dupReporting.Closed.IsZero() {
+ return "dup bug is already closed", fmt.Errorf("dup bug is already closed")
+ }
+ if bugReporting.Name != dupReporting.Name {
+ return "can't find the dup bug",
+ fmt.Errorf("inter-reporting dup: %v -> %v",
+ bugReporting.Name, dupReporting.Name)
+ }
+ dupHash = bugKeyHash(dup.Namespace, dup.Title, dup.Seq)
+ }
+
+ reply := ""
+ tx := func(c context.Context) error {
+ var err error
+ reply, err = incomingCommandTx(c, now, cmd, bugKey, dupHash)
+ return err
+ }
+ err = datastore.RunInTransaction(c, tx, &datastore.TransactionOptions{XG: true})
+ if err != nil && reply == "" {
+ reply = internalError
+ }
+ return reply, err
+}
+
+func incomingCommandTx(c context.Context, now time.Time, cmd *dashapi.BugUpdate, bugKey *datastore.Key, dupHash string) (string, error) {
+ bug := new(Bug)
+ if err := datastore.Get(c, bugKey, bug); err != nil {
+ return "can't find the corresponding bug", err
+ }
+ switch bug.Status {
+ case BugStatusOpen, BugStatusDup:
+ case BugStatusFixed, BugStatusInvalid:
+ return "this bug is already closed",
+ fmt.Errorf("got a command for a closed bug")
+ default:
+ return internalError,
+ fmt.Errorf("unknown bug status %v", bug.Status)
+ }
+ bugReporting, final := bugReportingByID(bug, cmd.ID, now)
+ if bugReporting == nil {
+ return internalError, fmt.Errorf("can't find bug reporting")
+ }
+ if !bugReporting.Closed.IsZero() {
+ return "this bug is already closed", fmt.Errorf("got a command for a closed reporting")
+ }
+
+ state, err := loadReportingState(c)
+ if err != nil {
+ return internalError, err
+ }
+ stateEnt := state.getEntry(now, bug.Namespace, bugReporting.Name)
+ switch cmd.Status {
+ case dashapi.BugStatusOpen:
+ bug.Status = BugStatusOpen
+ bug.Closed = time.Time{}
+ if bugReporting.Reported.IsZero() {
+ bugReporting.Reported = now
+ stateEnt.Sent++ // sending repro does not count against the quota
+ }
+ if bug.ReproLevel < cmd.ReproLevel {
+ return internalError, fmt.Errorf("bug update with invalid repro level: %v/%v",
+ bug.ReproLevel, cmd.ReproLevel)
+ }
+ case dashapi.BugStatusUpstream:
+ if final {
+ reply := "can't close, this is final destination"
+ return reply, errors.New(reply)
+ }
+ if len(bug.Commits) != 0 {
+ // We could handle this case, but how/when it will occur
+ // in real life is unclear now.
+ reply := "can't upstream, the bug has fixing commits"
+ return reply, errors.New(reply)
+ }
+ bug.Status = BugStatusOpen
+ bug.Closed = now
+ bugReporting.Closed = now
+ case dashapi.BugStatusInvalid:
+ bugReporting.Closed = now
+ bug.Closed = now
+ bug.Status = BugStatusInvalid
+ case dashapi.BugStatusDup:
+ bug.Status = BugStatusDup
+ bug.Closed = now
+ bug.DupOf = dupHash
+ default:
+ return "unknown bug status", fmt.Errorf("unknown bug status %v", cmd.Status)
+ }
+ if len(cmd.FixCommits) != 0 && (bug.Status == BugStatusOpen || bug.Status == BugStatusDup) {
+ m := make(map[string]bool)
+ for _, com := range cmd.FixCommits {
+ m[com] = true
+ }
+ same := false
+ if len(bug.Commits) == len(m) {
+ same = true
+ for _, com := range bug.Commits {
+ if !m[com] {
+ same = false
+ break
+ }
+ }
+ }
+ if !same {
+ commits := make([]string, 0, len(m))
+ for com := range m {
+ if len(com) < 3 {
+ err := fmt.Errorf("bad commit title: %q", com)
+ return err.Error(), err
+ }
+ commits = append(commits, com)
+ }
+ sort.Strings(commits)
+ bug.Commits = commits
+ bug.PatchedOn = nil
+ }
+ }
+ if cmd.Link != "" {
+ bugReporting.Link = cmd.Link
+ }
+ if bugReporting.ReproLevel < cmd.ReproLevel {
+ bugReporting.ReproLevel = cmd.ReproLevel
+ }
+ if bug.Status != BugStatusDup {
+ bug.DupOf = ""
+ }
+ if _, err := datastore.Put(c, bugKey, bug); err != nil {
+ return internalError, fmt.Errorf("failed to put bug: %v", err)
+ }
+ if err := saveReportingState(c, state); err != nil {
+ return internalError, err
+ }
+ return "", nil
+}
+
+func findBugByReportingID(c context.Context, id string) (*Bug, *datastore.Key, error) {
+ var bugs []*Bug
+ keys, err := datastore.NewQuery("Bug").
+ Filter("Reporting.ID=", id).
+ Limit(2).
+ GetAll(c, &bugs)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to fetch bugs: %v", err)
+ }
+ if len(bugs) == 0 {
+ return nil, nil, fmt.Errorf("failed to find bug by reporting id %q", id)
+ }
+ if len(bugs) > 1 {
+ return nil, nil, fmt.Errorf("multiple bugs for reporting id %q", id)
+ }
+ return bugs[0], keys[0], nil
+}
+
+func bugReportingByID(bug *Bug, id string, now time.Time) (*BugReporting, bool) {
+ for i := range bug.Reporting {
+ if bug.Reporting[i].ID == id {
+ return &bug.Reporting[i], i == len(bug.Reporting)-1
+ }
+ bug.Reporting[i].Closed = now
+ }
+ return nil, false
+}
+
+func queryCrashesForBug(c context.Context, bugKey *datastore.Key, limit int) ([]*Crash, error) {
+ var crashes []*Crash
+ _, err := datastore.NewQuery("Crash").
+ Ancestor(bugKey).
+ Order("-ReproC").
+ Order("-ReproSyz").
+ Order("-ReportLen").
+ Order("-Time").
+ Limit(limit).
+ GetAll(c, &crashes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch crashes: %v", err)
+ }
+ return crashes, nil
+}
+
+func findCrashForBug(c context.Context, bug *Bug, bugKey *datastore.Key) (*Crash, error) {
+ crashes, err := queryCrashesForBug(c, bugKey, 1)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch crashes: %v", err)
+ }
+ if len(crashes) < 1 {
+ return nil, fmt.Errorf("no crashes")
+ }
+ crash := crashes[0]
+ if bug.ReproLevel == ReproLevelC {
+ if crash.ReproC == 0 {
+ log.Errorf(c, "bug '%v': has C repro, but crash without C repro", bug.Title)
+ }
+ } else if bug.ReproLevel == ReproLevelSyz {
+ if crash.ReproSyz == 0 {
+ log.Errorf(c, "bug '%v': has syz repro, but crash without syz repro", bug.Title)
+ }
+ } else if bug.HasReport {
+ if crash.Report == 0 {
+ log.Errorf(c, "bug '%v': has report, but crash without report", bug.Title)
+ }
+ }
+ return crash, nil
+}
+
+func loadReportingState(c context.Context) (*ReportingState, error) {
+ state := new(ReportingState)
+ key := datastore.NewKey(c, "ReportingState", "", 1, nil)
+ if err := datastore.Get(c, key, state); err != nil && err != datastore.ErrNoSuchEntity {
+ return nil, fmt.Errorf("failed to get reporting state: %v", err)
+ }
+ return state, nil
+}
+
+func saveReportingState(c context.Context, state *ReportingState) error {
+ key := datastore.NewKey(c, "ReportingState", "", 1, nil)
+ if _, err := datastore.Put(c, key, state); err != nil {
+ return fmt.Errorf("failed to put reporting state: %v", err)
+ }
+ return nil
+}
+
+func (state *ReportingState) getEntry(now time.Time, namespace, name string) *ReportingStateEntry {
+ if namespace == "" || name == "" {
+ panic(fmt.Sprintf("requesting reporting state for %v/%v", namespace, name))
+ }
+ // Convert time to date of the form 20170125.
+ year, month, day := now.Date()
+ date := year*10000 + int(month)*100 + day
+ for i := range state.Entries {
+ ent := &state.Entries[i]
+ if ent.Namespace == namespace && ent.Name == name {
+ if ent.Date != date {
+ ent.Date = date
+ ent.Sent = 0
+ }
+ return ent
+ }
+ }
+ state.Entries = append(state.Entries, ReportingStateEntry{
+ Namespace: namespace,
+ Name: name,
+ Date: date,
+ Sent: 0,
+ })
+ return &state.Entries[len(state.Entries)-1]
+}
+
+// bugReportSorter sorts bugs by priority we want to report them.
+// E.g. we want to report bugs with reproducers before bugs without reproducers.
+type bugReportSorter []*Bug
+
+func (a bugReportSorter) Len() int { return len(a) }
+func (a bugReportSorter) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a bugReportSorter) Less(i, j int) bool {
+ if a[i].ReproLevel != a[j].ReproLevel {
+ return a[i].ReproLevel > a[j].ReproLevel
+ }
+ if a[i].HasReport != a[j].HasReport {
+ return a[i].HasReport
+ }
+ if a[i].NumCrashes != a[j].NumCrashes {
+ return a[i].NumCrashes > a[j].NumCrashes
+ }
+ return a[i].FirstTime.Before(a[j].FirstTime)
+}
diff --git a/dashboard/app/reporting_email.go b/dashboard/app/reporting_email.go
new file mode 100644
index 000000000..131c4faa9
--- /dev/null
+++ b/dashboard/app/reporting_email.go
@@ -0,0 +1,200 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package dash
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/mail"
+ "text/template"
+
+ "github.com/google/syzkaller/dashboard/dashapi"
+ "github.com/google/syzkaller/pkg/email"
+ "golang.org/x/net/context"
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/log"
+ aemail "google.golang.org/appengine/mail"
+)
+
+// Email reporting interface.
+
+func init() {
+ http.HandleFunc("/email_poll", handleEmailPoll)
+ http.HandleFunc("/_ah/mail/", handleIncomingMail)
+}
+
+const emailType = "email"
+
+type EmailConfig struct {
+ Email string
+ MailMaintainers bool
+}
+
+func (cfg *EmailConfig) Type() string {
+ return emailType
+}
+
+func (cfg *EmailConfig) Validate() error {
+ if _, err := mail.ParseAddress(cfg.Email); err != nil {
+ return fmt.Errorf("bad email address %q: %v", cfg.Email, err)
+ }
+ if cfg.MailMaintainers {
+ return fmt.Errorf("mailing maintainers is not supported yet")
+ }
+ return nil
+}
+
+// handleEmailPoll is called by cron and sends emails for new bugs, if any.
+func handleEmailPoll(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+ if err := emailPoll(c); err != nil {
+ log.Errorf(c, "%v", err)
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ w.Write([]byte("OK"))
+}
+
+func emailPoll(c context.Context) error {
+ reports := reportingPoll(c, emailType)
+ for _, rep := range reports {
+ if err := emailReport(c, rep); err != nil {
+ log.Errorf(c, "failed to report: %v", err)
+ }
+ }
+ return nil
+}
+
+func emailReport(c context.Context, rep *dashapi.BugReport) error {
+ cfg := new(EmailConfig)
+ if err := json.Unmarshal(rep.Config, cfg); err != nil {
+ return fmt.Errorf("failed to unmarshal email config: %v", err)
+ }
+ to := []string{cfg.Email}
+ if cfg.MailMaintainers {
+ panic("are you nuts?")
+ to = append(to, rep.Maintainers...)
+ }
+ attachments := []aemail.Attachment{
+ {
+ Name: "config.txt",
+ Data: rep.KernelConfig,
+ },
+ }
+ repro := dashapi.ReproLevelNone
+ if len(rep.ReproC) != 0 {
+ repro = dashapi.ReproLevelC
+ attachments = append(attachments, aemail.Attachment{
+ Name: "repro.c",
+ Data: rep.ReproC,
+ })
+ }
+ if len(rep.ReproSyz) != 0 {
+ repro = dashapi.ReproLevelSyz
+ attachments = append(attachments, aemail.Attachment{
+ Name: "repro.txt",
+ Data: rep.ReproSyz,
+ })
+ }
+ from, err := email.AddAddrContext(fromAddr(c), rep.ID)
+ if err != nil {
+ return err
+ }
+ if err := sendMailTemplate(c, rep.Title, from, to, attachments, "mail_bug.txt", rep); err != nil {
+ return err
+ }
+ cmd := &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusOpen,
+ ReproLevel: repro,
+ }
+ incomingCommand(c, cmd)
+ return nil
+}
+
+// handleIncomingMail is the entry point for incoming emails.
+// TODO: this part is unfinished.
+func handleIncomingMail(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+ if err := incomingMail(c, r); err != nil {
+ log.Errorf(c, "%v", err)
+ }
+}
+
+func incomingMail(c context.Context, r *http.Request) error {
+ msg, err := email.Parse(r.Body, fromAddr(c))
+ if err != nil {
+ return err
+ }
+ log.Infof(c, "received email: subject '%v', from '%v', cc '%v', msg '%v', bug '%v', cmd '%v'",
+ msg.Subject, msg.From, msg.Cc, msg.MessageID, msg.BugID, msg.Command)
+ var status dashapi.BugStatus
+ switch msg.Command {
+ case "":
+ return nil
+ case "upstream":
+ status = dashapi.BugStatusUpstream
+ case "invalid":
+ status = dashapi.BugStatusInvalid
+ default:
+ return replyTo(c, msg, fmt.Sprintf("unknown command %q", msg.Command), nil)
+ }
+ cmd := &dashapi.BugUpdate{
+ ID: msg.BugID,
+ Status: status,
+ }
+ reply, _ := incomingCommand(c, cmd)
+ return replyTo(c, msg, reply, nil)
+}
+
+var mailTemplates = template.Must(template.New("").ParseGlob("mail_*.txt"))
+
+func sendMailTemplate(c context.Context, subject, from string, to []string,
+ attachments []aemail.Attachment, template string, data interface{}) error {
+ body := new(bytes.Buffer)
+ if err := mailTemplates.ExecuteTemplate(body, template, data); err != nil {
+ return fmt.Errorf("failed to execute %v template: %v", template, err)
+ }
+ msg := &aemail.Message{
+ Sender: from,
+ To: to,
+ Subject: subject,
+ Body: body.String(),
+ Attachments: attachments,
+ }
+ if err := aemail.Send(c, msg); err != nil {
+ return fmt.Errorf("failed to send email: %v", err)
+ }
+ return nil
+}
+
+func replyTo(c context.Context, msg *email.Email, reply string, attachment *aemail.Attachment) error {
+ var attachments []aemail.Attachment
+ if attachment != nil {
+ attachments = append(attachments, *attachment)
+ }
+ from, err := email.AddAddrContext(fromAddr(c), msg.BugID)
+ if err != nil {
+ return err
+ }
+ replyMsg := &aemail.Message{
+ Sender: from,
+ To: []string{msg.From},
+ Cc: msg.Cc,
+ Subject: msg.Subject,
+ Body: email.FormReply(msg.Body, reply),
+ Attachments: attachments,
+ Headers: mail.Header{"In-Reply-To": []string{msg.MessageID}},
+ }
+ if err := aemail.Send(c, replyMsg); err != nil {
+ return fmt.Errorf("failed to send email: %v", err)
+ }
+ return nil
+}
+
+func fromAddr(c context.Context) string {
+ return fmt.Sprintf("syzbot <bot@%v.appspotmail.com>", appengine.AppID(c))
+}
diff --git a/dashboard/app/reporting_external.go b/dashboard/app/reporting_external.go
new file mode 100644
index 000000000..e8f9ea622
--- /dev/null
+++ b/dashboard/app/reporting_external.go
@@ -0,0 +1,49 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package dash
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ "github.com/google/syzkaller/dashboard/dashapi"
+ "golang.org/x/net/context"
+)
+
+// Interface with external reporting systems.
+// The external system is meant to poll for new bugs with apiReportingPoll,
+// and report back bug status updates with apiReportingUpdate.
+
+type ExternalConfig struct {
+ ID string
+}
+
+func (cfg *ExternalConfig) Type() string {
+ return cfg.ID
+}
+
+func apiReportingPoll(c context.Context, ns string, r *http.Request) (interface{}, error) {
+ req := new(dashapi.PollRequest)
+ if err := json.NewDecoder(r.Body).Decode(req); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal request: %v", err)
+ }
+ reports := reportingPoll(c, req.Type)
+ resp := &dashapi.PollResponse{
+ Reports: reports,
+ }
+ return resp, nil
+}
+
+func apiReportingUpdate(c context.Context, ns string, r *http.Request) (interface{}, error) {
+ req := new(dashapi.BugUpdate)
+ if err := json.NewDecoder(r.Body).Decode(req); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal request: %v", err)
+ }
+ reply, ok := incomingCommand(c, req)
+ return &dashapi.BugUpdateReply{
+ OK: ok,
+ Text: reply,
+ }, nil
+}
diff --git a/dashboard/app/reporting_test.go b/dashboard/app/reporting_test.go
new file mode 100644
index 000000000..58709ff1f
--- /dev/null
+++ b/dashboard/app/reporting_test.go
@@ -0,0 +1,386 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+// +build aetest
+
+package dash
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/google/syzkaller/dashboard/dashapi"
+)
+
+func TestReportBug(t *testing.T) {
+ c := NewCtx(t)
+ defer c.Close()
+
+ build := testBuild(1)
+ c.expectOK(c.API(client1, key1, "upload_build", build, nil))
+
+ crash1 := &dashapi.Crash{
+ BuildID: "build1",
+ Title: "title1",
+ Maintainers: []string{`"Foo Bar" <foo@bar.com>`, `bar@foo.com`},
+ Log: []byte("log1"),
+ Report: []byte("report1"),
+ ReproOpts: []byte("some opts"),
+ ReproSyz: []byte("getpid()"),
+ ReproC: []byte("int main() {}"),
+ }
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+
+ // Must get no reports for "unknown" type.
+ pr := &dashapi.PollRequest{
+ Type: "unknown",
+ }
+ resp := new(dashapi.PollResponse)
+ c.expectOK(c.API(client1, key1, "reporting_poll", pr, resp))
+ c.expectEQ(len(resp.Reports), 0)
+
+ // Must get a proper report for "test" type.
+ pr.Type = "test"
+ c.expectOK(c.API(client1, key1, "reporting_poll", pr, resp))
+ c.expectEQ(len(resp.Reports), 1)
+ rep := resp.Reports[0]
+ if rep.ID == "" {
+ t.Fatalf("empty report ID")
+ }
+ want := &dashapi.BugReport{
+ Config: []byte(`{"Index":1}`),
+ ID: rep.ID,
+ Title: "title1",
+ Maintainers: []string{`"Foo Bar" <foo@bar.com>`, `bar@foo.com`},
+ CompilerID: "compiler1",
+ KernelRepo: "repo1",
+ KernelBranch: "branch1",
+ KernelCommit: "kernel_commit1",
+ KernelConfig: []byte("config1"),
+ Log: []byte("log1"),
+ Report: []byte("report1"),
+ ReproC: []byte("int main() {}"),
+ ReproSyz: []byte("#some opts\ngetpid()"),
+ }
+ c.expectEQ(rep, want)
+
+ // Since we did not update bug status yet, should get the same report again.
+ c.expectOK(c.API(client1, key1, "reporting_poll", pr, resp))
+ c.expectEQ(len(resp.Reports), 1)
+ c.expectEQ(resp.Reports[0], want)
+
+ cmd := &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusOpen,
+ ReproLevel: dashapi.ReproLevelC,
+ }
+ reply := new(dashapi.BugUpdateReply)
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ // After bug update should not get the report again.
+ c.expectOK(c.API(client1, key1, "reporting_poll", pr, resp))
+ c.expectEQ(len(resp.Reports), 0)
+
+ // Now close the bug in the first reporting.
+ cmd = &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusUpstream,
+ }
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ // Check that bug updates for the first reporting fail now.
+ cmd = &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusOpen,
+ }
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, false)
+
+ // Check that we get the report in the second reporting.
+ c.expectOK(c.API(client1, key1, "reporting_poll", pr, resp))
+ c.expectEQ(len(resp.Reports), 1)
+ rep2 := resp.Reports[0]
+ if rep2.ID == "" || rep2.ID == rep.ID {
+ t.Fatalf("bad report ID: %q", rep2.ID)
+ }
+ want.ID = rep2.ID
+ want.Config = []byte(`{"Index":2}`)
+ c.expectEQ(rep2, want)
+
+ // Check that that we can't upstream the bug in the final reporting.
+ cmd = &dashapi.BugUpdate{
+ ID: rep2.ID,
+ Status: dashapi.BugStatusUpstream,
+ }
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, false)
+}
+
+func TestInvalidBug(t *testing.T) {
+ c := NewCtx(t)
+ defer c.Close()
+
+ build := testBuild(1)
+ c.expectOK(c.API(client1, key1, "upload_build", build, nil))
+
+ crash1 := testCrash(build, 1)
+ crash1.ReproC = []byte("int main() {}")
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+
+ pr := &dashapi.PollRequest{
+ Type: "test",
+ }
+ resp := new(dashapi.PollResponse)
+ c.expectOK(c.API(client1, key1, "reporting_poll", pr, resp))
+ c.expectEQ(len(resp.Reports), 1)
+ rep := resp.Reports[0]
+ c.expectEQ(rep.Title, "title1")
+
+ cmd := &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusOpen,
+ ReproLevel: dashapi.ReproLevelC,
+ }
+ reply := new(dashapi.BugUpdateReply)
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ // Mark the bug as invalid.
+ cmd = &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusInvalid,
+ }
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ // Now it should not be reported in either reporting.
+ c.expectOK(c.API(client1, key1, "reporting_poll", pr, resp))
+ c.expectEQ(len(resp.Reports), 0)
+
+ // Now a similar crash happens again.
+ crash2 := &dashapi.Crash{
+ BuildID: "build1",
+ Title: "title1",
+ Log: []byte("log2"),
+ Report: []byte("report2"),
+ ReproC: []byte("int main() { return 1; }"),
+ }
+ c.expectOK(c.API(client1, key1, "report_crash", crash2, nil))
+
+ // Now it should be reported again.
+ c.expectOK(c.API(client1, key1, "reporting_poll", pr, resp))
+ c.expectEQ(len(resp.Reports), 1)
+ rep = resp.Reports[0]
+ if rep.ID == "" {
+ t.Fatalf("empty report ID")
+ }
+ want := &dashapi.BugReport{
+ Config: []byte(`{"Index":1}`),
+ ID: rep.ID,
+ Title: "title1 (2)",
+ CompilerID: "compiler1",
+ KernelRepo: "repo1",
+ KernelBranch: "branch1",
+ KernelCommit: "kernel_commit1",
+ KernelConfig: []byte("config1"),
+ Log: []byte("log2"),
+ Report: []byte("report2"),
+ ReproC: []byte("int main() { return 1; }"),
+ }
+ c.expectEQ(rep, want)
+}
+
+func TestReportingQuota(t *testing.T) {
+ c := NewCtx(t)
+ defer c.Close()
+
+ build := testBuild(1)
+ c.expectOK(c.API(client1, key1, "upload_build", build, nil))
+
+ const numReports = 8 // quota is 3 per day
+ for i := 0; i < numReports; i++ {
+ crash := &dashapi.Crash{
+ BuildID: "build1",
+ Title: fmt.Sprintf("title%v", i),
+ Log: []byte(fmt.Sprintf("log%v", i)),
+ Report: []byte(fmt.Sprintf("report%v", i)),
+ }
+ c.expectOK(c.API(client1, key1, "report_crash", crash, nil))
+ }
+
+ for _, reports := range []int{3, 3, 2, 0, 0} {
+ c.advanceTime(24 * time.Hour)
+ pr := &dashapi.PollRequest{
+ Type: "test",
+ }
+ resp := new(dashapi.PollResponse)
+ c.expectOK(c.API(client1, key1, "reporting_poll", pr, resp))
+ c.expectEQ(len(resp.Reports), reports)
+ for _, rep := range resp.Reports {
+ cmd := &dashapi.BugUpdate{
+ ID: rep.ID,
+ Status: dashapi.BugStatusOpen,
+ }
+ reply := new(dashapi.BugUpdateReply)
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+ }
+ // Out of quota for today, so must get 0 reports.
+ c.expectOK(c.API(client1, key1, "reporting_poll", pr, resp))
+ c.expectEQ(len(resp.Reports), 0)
+ }
+}
+
+// Basic dup scenario: mark one bug as dup of another.
+func TestReportingDup(t *testing.T) {
+ c := NewCtx(t)
+ defer c.Close()
+
+ build := testBuild(1)
+ c.expectOK(c.API(client1, key1, "upload_build", build, nil))
+
+ crash1 := testCrash(build, 1)
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+
+ crash2 := testCrash(build, 2)
+ c.expectOK(c.API(client1, key1, "report_crash", crash2, nil))
+
+ pr := &dashapi.PollRequest{
+ Type: "test",
+ }
+ resp := new(dashapi.PollResponse)
+ c.expectOK(c.API(client1, key1, "reporting_poll", pr, resp))
+ c.expectEQ(len(resp.Reports), 2)
+
+ rep1 := resp.Reports[0]
+ cmd := &dashapi.BugUpdate{
+ ID: rep1.ID,
+ Status: dashapi.BugStatusOpen,
+ }
+ reply := new(dashapi.BugUpdateReply)
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ rep2 := resp.Reports[1]
+ cmd = &dashapi.BugUpdate{
+ ID: rep2.ID,
+ Status: dashapi.BugStatusOpen,
+ }
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ // Dup.
+ cmd = &dashapi.BugUpdate{
+ ID: rep2.ID,
+ Status: dashapi.BugStatusDup,
+ DupOf: rep1.ID,
+ }
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ // Undup.
+ cmd = &dashapi.BugUpdate{
+ ID: rep2.ID,
+ Status: dashapi.BugStatusOpen,
+ }
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ // Dup again.
+ cmd = &dashapi.BugUpdate{
+ ID: rep2.ID,
+ Status: dashapi.BugStatusDup,
+ DupOf: rep1.ID,
+ }
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ // Dup crash happens again, new bug must not be created.
+ c.expectOK(c.API(client1, key1, "report_crash", crash2, nil))
+ c.expectOK(c.API(client1, key1, "reporting_poll", pr, resp))
+ c.expectEQ(len(resp.Reports), 0)
+
+ // Now close the original bug, and check that new bugs for dup are now created.
+ cmd = &dashapi.BugUpdate{
+ ID: rep1.ID,
+ Status: dashapi.BugStatusInvalid,
+ }
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+ c.expectOK(c.API(client1, key1, "report_crash", crash2, nil))
+ c.expectOK(c.API(client1, key1, "reporting_poll", pr, resp))
+ c.expectEQ(len(resp.Reports), 1)
+ c.expectEQ(resp.Reports[0].Title, crash2.Title+" (2)")
+}
+
+// Test that marking dups across reporting levels is not permitted.
+func TestReportingDupCrossReporting(t *testing.T) {
+ c := NewCtx(t)
+ defer c.Close()
+
+ build := testBuild(1)
+ c.expectOK(c.API(client1, key1, "upload_build", build, nil))
+
+ crash1 := testCrash(build, 1)
+ c.expectOK(c.API(client1, key1, "report_crash", crash1, nil))
+
+ crash2 := testCrash(build, 2)
+ c.expectOK(c.API(client1, key1, "report_crash", crash2, nil))
+
+ pr := &dashapi.PollRequest{
+ Type: "test",
+ }
+ resp := new(dashapi.PollResponse)
+ c.expectOK(c.API(client1, key1, "reporting_poll", pr, resp))
+ c.expectEQ(len(resp.Reports), 2)
+
+ rep1 := resp.Reports[0]
+ cmd := &dashapi.BugUpdate{
+ ID: rep1.ID,
+ Status: dashapi.BugStatusOpen,
+ }
+ reply := new(dashapi.BugUpdateReply)
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ rep2 := resp.Reports[1]
+ cmd = &dashapi.BugUpdate{
+ ID: rep2.ID,
+ Status: dashapi.BugStatusOpen,
+ }
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+
+ // Upstream second bug.
+ cmd = &dashapi.BugUpdate{
+ ID: rep2.ID,
+ Status: dashapi.BugStatusUpstream,
+ }
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, true)
+ c.expectOK(c.API(client1, key1, "reporting_poll", pr, resp))
+ c.expectEQ(len(resp.Reports), 1)
+ rep3 := resp.Reports[0]
+
+ // Duping must fail all ways.
+ cmds := []*dashapi.BugUpdate{
+ &dashapi.BugUpdate{ID: rep1.ID, DupOf: rep1.ID},
+ &dashapi.BugUpdate{ID: rep1.ID, DupOf: rep2.ID},
+ &dashapi.BugUpdate{ID: rep1.ID, DupOf: rep3.ID},
+ &dashapi.BugUpdate{ID: rep2.ID, DupOf: rep1.ID},
+ &dashapi.BugUpdate{ID: rep2.ID, DupOf: rep2.ID},
+ &dashapi.BugUpdate{ID: rep2.ID, DupOf: rep3.ID},
+ &dashapi.BugUpdate{ID: rep3.ID, DupOf: rep1.ID},
+ &dashapi.BugUpdate{ID: rep3.ID, DupOf: rep2.ID},
+ &dashapi.BugUpdate{ID: rep3.ID, DupOf: rep3.ID},
+ }
+ for _, cmd := range cmds {
+ t.Logf("duping %v -> %v", cmd.ID, cmd.DupOf)
+ cmd.Status = dashapi.BugStatusDup
+ c.expectOK(c.API(client1, key1, "reporting_update", cmd, reply))
+ c.expectEQ(reply.OK, false)
+ }
+}
diff --git a/dashboard/app/static/favicon.ico b/dashboard/app/static/favicon.ico
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/dashboard/app/static/favicon.ico
diff --git a/dashboard/app/static/style.css b/dashboard/app/static/style.css
new file mode 100644
index 000000000..07f742485
--- /dev/null
+++ b/dashboard/app/static/style.css
@@ -0,0 +1,122 @@
+#topbar {
+ padding: 5px 10px;
+ background: #E0EBF5;
+}
+
+#topbar a {
+ color: #375EAB;
+ text-decoration: none;
+}
+
+h1, h2, h3, h4 {
+ margin: 0;
+ padding: 0;
+ color: #375EAB;
+ font-weight: bold;
+}
+
+table {
+ border: 1px solid #ccc;
+ margin: 20px 5px;
+ border-collapse: collapse;
+ white-space: nowrap;
+ text-overflow: ellipsis;
+ overflow: hidden;
+}
+
+table caption {
+ font-weight: bold;
+}
+
+table td, table th {
+ vertical-align: top;
+ padding: 2px 8px;
+ text-overflow: ellipsis;
+ overflow: hidden;
+}
+
+.position_table {
+ border: 0px;
+ margin: 0px;
+ width: 100%;
+ border-collapse: collapse;
+}
+
+.position_table td, .position_table tr {
+ vertical-align: center;
+ padding: 0px;
+}
+
+.position_table .search {
+ text-align: right;
+}
+
+.list_table td, .list_table th {
+ border-left: 1px solid #ccc;
+}
+
+.list_table th {
+ background: #F4F4F4;
+}
+
+.list_table tr:nth-child(2n+1) {
+ background: #F4F4F4;
+}
+
+.list_table tr:hover {
+ background: #ffff99;
+}
+
+.list_table .namespace {
+ width: 100pt;
+ max-width: 100pt;
+}
+
+.list_table .title {
+ width: 400pt;
+ max-width: 400pt;
+}
+
+.list_table .count {
+ text-align: right;
+}
+
+.list_table .tag {
+ font-family: monospace;
+ font-size: 8pt;
+ width: 200pt;
+ max-width: 200pt;
+}
+
+.list_table .opts {
+ width: 40pt;
+ max-width: 40pt;
+}
+
+.list_table .status {
+ width: 300pt;
+ max-width: 300pt;
+}
+
+.list_table .commits {
+ width: 200pt;
+ max-width: 200pt;
+}
+
+.list_table .maintainers {
+ width: 300pt;
+ max-width: 300pt;
+}
+
+.button {
+ color: #222;
+ border: 1px solid #375EAB;
+ background: #E0EBF5;
+ border-radius: 3px;
+ cursor: pointer;
+ margin-left: 10px;
+}
+
+textarea {
+ width:100%;
+}
diff --git a/dashboard/app/util_test.go b/dashboard/app/util_test.go
new file mode 100644
index 000000000..8666dbabf
--- /dev/null
+++ b/dashboard/app/util_test.go
@@ -0,0 +1,196 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+// The test uses aetest package that starts local dev_appserver and handles all requests locally:
+// https://cloud.google.com/appengine/docs/standard/go/tools/localunittesting/reference
+// The test requires installed appengine SDK (dev_appserver), so we guard it by aetest tag.
+// Run the test with: goapp test -tags=aetest
+
+// +build aetest
+
+package dash
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/google/syzkaller/dashboard/dashapi"
+ "golang.org/x/net/context"
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/aetest"
+ "google.golang.org/appengine/user"
+)
+
+type Ctx struct {
+ t *testing.T
+ inst aetest.Instance
+ mockedTime time.Time
+}
+
+func NewCtx(t *testing.T) *Ctx {
+ t.Parallel()
+ inst, err := aetest.NewInstance(&aetest.Options{
+ // Without this option datastore queries return data with slight delay,
+ // which fails reporting tests.
+ StronglyConsistentDatastore: true,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ c := &Ctx{
+ t: t,
+ inst: inst,
+ mockedTime: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC),
+ }
+ return c
+}
+
+func (c *Ctx) expectOK(err error) {
+ if err != nil {
+ c.t.Fatalf("\n%v: %v", caller(0), err)
+ }
+}
+
+func (c *Ctx) expectFail(msg string, err error) {
+ if err == nil {
+ c.t.Fatal("\n%v: expected to fail, but it does not", caller(0))
+ }
+ if !strings.Contains(err.Error(), msg) {
+ c.t.Fatalf("\n%v: expected to fail with %q, but failed with %q", caller(0), msg, err)
+ }
+}
+
+func (c *Ctx) expectEQ(got, want interface{}) {
+ if !reflect.DeepEqual(got, want) {
+ c.t.Fatalf("\n%v: got %#v, want %#v", caller(0), got, want)
+ }
+}
+
+func caller(skip int) string {
+ _, file, line, _ := runtime.Caller(skip + 2)
+ return fmt.Sprintf("%v:%v", filepath.Base(file), line)
+}
+
+func (c *Ctx) Close() {
+ if !c.t.Failed() {
+ // Ensure that we can render bugs in the final test state.
+ c.expectOK(c.GET("/"))
+ }
+ unregisterContext(c)
+ c.inst.Close()
+}
+
+func (c *Ctx) advanceTime(d time.Duration) {
+ c.mockedTime = c.mockedTime.Add(d)
+}
+
+// API makes an api request to the app from the specified client.
+func (c *Ctx) API(client, key, method string, req, reply interface{}) error {
+ doer := func(r *http.Request) (*http.Response, error) {
+ registerContext(r, c)
+ w := httptest.NewRecorder()
+ http.DefaultServeMux.ServeHTTP(w, r)
+ // Later versions of Go have a nice w.Result method,
+ // but we stuck on 1.6 on appengine.
+ if w.Body == nil {
+ w.Body = new(bytes.Buffer)
+ }
+ res := &http.Response{
+ StatusCode: w.Code,
+ Status: http.StatusText(w.Code),
+ Body: ioutil.NopCloser(bytes.NewReader(w.Body.Bytes())),
+ }
+ return res, nil
+ }
+
+ c.t.Logf("API(%v): %#v", method, req)
+ err := dashapi.Query(client, "", key, method, c.inst.NewRequest, doer, req, reply)
+ if err != nil {
+ c.t.Logf("ERROR: %v", err)
+ return err
+ }
+ c.t.Logf("REPLY: %#v", reply)
+ return nil
+}
+
+// GET sends authorized HTTP GET request to the app.
+func (c *Ctx) GET(url string) error {
+ c.t.Logf("GET: %v", url)
+ r, err := c.inst.NewRequest("GET", url, nil)
+ if err != nil {
+ c.t.Fatal(err)
+ }
+ registerContext(r, c)
+ user := &user.User{
+ Email: "test@syzkaller.com",
+ AuthDomain: "gmail.com",
+ Admin: true,
+ }
+ aetest.Login(user, r)
+ w := httptest.NewRecorder()
+ http.DefaultServeMux.ServeHTTP(w, r)
+ c.t.Logf("REPLY: %v", w.Code)
+ if w.Code != http.StatusOK {
+ return fmt.Errorf("%v", w.Body.String())
+ }
+ return nil
+}
+
+// Mock time as some functionality relies on real time.
+func init() {
+ timeNow = func(c context.Context) time.Time {
+ return getRequestContext(c).mockedTime
+ }
+}
+
+// Machinery to associate mocked time with requests.
+type RequestMapping struct {
+ c context.Context
+ ctx *Ctx
+}
+
+var (
+ requestMu sync.Mutex
+ requestContexts []RequestMapping
+)
+
+func registerContext(r *http.Request, c *Ctx) {
+ requestMu.Lock()
+ defer requestMu.Unlock()
+ requestContexts = append(requestContexts, RequestMapping{appengine.NewContext(r), c})
+}
+
+func getRequestContext(c context.Context) *Ctx {
+ requestMu.Lock()
+ defer requestMu.Unlock()
+ for _, m := range requestContexts {
+ if reflect.DeepEqual(c, m.c) {
+ return m.ctx
+ }
+ }
+ panic(fmt.Sprintf("no context for: %#v", c))
+}
+
+func unregisterContext(c *Ctx) {
+ requestMu.Lock()
+ defer requestMu.Unlock()
+ n := 0
+ for _, m := range requestContexts {
+ if m.ctx == c {
+ continue
+ }
+ requestContexts[n] = m
+ n++
+ }
+ requestContexts = requestContexts[:n]
+}