aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--dashboard/app/app_test.go1
-rw-r--r--dashboard/app/cache.go75
-rw-r--r--dashboard/app/cache_test.go79
-rw-r--r--dashboard/app/config.go2
-rw-r--r--dashboard/app/cron.yaml2
-rw-r--r--dashboard/app/main.go27
6 files changed, 180 insertions, 6 deletions
diff --git a/dashboard/app/app_test.go b/dashboard/app/app_test.go
index 1ebabe04f..1a9cc5150 100644
--- a/dashboard/app/app_test.go
+++ b/dashboard/app/app_test.go
@@ -275,6 +275,7 @@ var testConfig = &GlobalConfig{
},
},
FindBugOriginTrees: true,
+ CacheUIPages: true,
},
"access-public-email": {
AccessLevel: AccessPublic,
diff --git a/dashboard/app/cache.go b/dashboard/app/cache.go
index b9ed0d54f..4e77d55cf 100644
--- a/dashboard/app/cache.go
+++ b/dashboard/app/cache.go
@@ -4,10 +4,12 @@
package main
import (
+ "encoding/json"
"fmt"
"net/http"
"time"
+ "github.com/google/syzkaller/pkg/image"
"golang.org/x/net/context"
"google.golang.org/appengine/v2"
"google.golang.org/appengine/v2/log"
@@ -48,6 +50,8 @@ func CacheGet(c context.Context, r *http.Request, ns string) (*Cached, error) {
return buildAndStoreCached(c, bugs, backports, ns, accessLevel)
}
+var cacheAccessLevels = []AccessLevel{AccessPublic, AccessUser, AccessAdmin}
+
// cacheUpdate updates memcache every hour (called by cron.yaml).
// Cache update is slow and we don't want to slow down user requests.
func cacheUpdate(w http.ResponseWriter, r *http.Request) {
@@ -63,7 +67,7 @@ func cacheUpdate(w http.ResponseWriter, r *http.Request) {
log.Errorf(c, "failed load ns=%v bugs: %v", ns, err)
continue
}
- for _, accessLevel := range []AccessLevel{AccessPublic, AccessUser, AccessAdmin} {
+ for _, accessLevel := range cacheAccessLevels {
_, err := buildAndStoreCached(c, bugs, backports, ns, accessLevel)
if err != nil {
log.Errorf(c, "failed to build cached for ns=%v access=%v: %v", ns, accessLevel, err)
@@ -104,6 +108,7 @@ func buildAndStoreCached(c context.Context, bugs []*Bug, backports []*rawBackpor
}
}
}
+
item := &memcache.Item{
Key: cacheKey(ns, accessLevel),
Object: v,
@@ -133,3 +138,71 @@ func (c *CachedBugStats) Record(bug *Bug) {
func cacheKey(ns string, accessLevel AccessLevel) string {
return fmt.Sprintf("%v-%v", ns, accessLevel)
}
+
+func CachedBugGroups(c context.Context, ns string, accessLevel AccessLevel) ([]*uiBugGroup, error) {
+ item, err := memcache.Get(c, cachedBugGroupsKey(ns, accessLevel))
+ if err == memcache.ErrCacheMiss {
+ return nil, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ jsonData, destructor := image.MustDecompress(item.Value)
+ defer destructor()
+
+ var ret []*uiBugGroup
+ err = json.Unmarshal(jsonData, &ret)
+ return ret, err
+}
+
+func cachedBugGroupsKey(ns string, accessLevel AccessLevel) string {
+ return fmt.Sprintf("%v-%v-bug-groups", ns, accessLevel)
+}
+
+// minuteCacheUpdate updates memcache every minute (called by cron.yaml).
+func handleMinuteCacheUpdate(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+ for ns, nsConfig := range getConfig(c).Namespaces {
+ if !nsConfig.CacheUIPages {
+ continue
+ }
+ err := minuteCacheNsUpdate(c, ns)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("bug groups cache update for %s failed: %v", ns, err),
+ http.StatusInternalServerError)
+ return
+ }
+ }
+}
+
+func minuteCacheNsUpdate(c context.Context, ns string) error {
+ bugs, err := loadVisibleBugs(c, ns, nil)
+ if err != nil {
+ return err
+ }
+ managers, err := managerList(c, ns)
+ if err != nil {
+ return err
+ }
+ for _, accessLevel := range cacheAccessLevels {
+ groups, err := prepareBugGroups(c, bugs, managers, accessLevel, ns)
+ if err != nil {
+ return fmt.Errorf("failed to fetch groups: %w", err)
+ }
+ encoded, err := json.Marshal(groups)
+ if err != nil {
+ return fmt.Errorf("failed to marshal: %w", err)
+ }
+ item := &memcache.Item{
+ Key: cachedBugGroupsKey(ns, accessLevel),
+ // The resulting blob can be quite big, so let's compress.
+ Value: image.Compress(encoded),
+ Expiration: 2 * time.Minute, // supposed to be updated by cron every minute
+ }
+ if err := memcache.Set(c, item); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/dashboard/app/cache_test.go b/dashboard/app/cache_test.go
new file mode 100644
index 000000000..db4e1a809
--- /dev/null
+++ b/dashboard/app/cache_test.go
@@ -0,0 +1,79 @@
+// Copyright 2023 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package main
+
+import (
+ "testing"
+
+ "github.com/google/syzkaller/dashboard/dashapi"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCachedBugGroups(t *testing.T) {
+ c := NewCtx(t)
+ defer c.Close()
+
+ client := c.makeClient(clientPublic, keyPublic, true)
+ build := testBuild(1)
+ client.UploadBuild(build)
+
+ // Bug at the first (AccessUser) stage of reporting.
+ crash := testCrash(build, 1)
+ crash.Title = "user-visible bug"
+ client.ReportCrash(crash)
+ client.pollBug()
+
+ // Bug at the second (AccessPublic) stage.
+ crash2 := testCrash(build, 2)
+ crash2.Title = "public-visible bug"
+ client.ReportCrash(crash2)
+ client.updateBug(client.pollBug().ID, dashapi.BugStatusUpstream, "")
+ client.pollBug()
+
+ // Add a build in a separate namespace (to check it's not mixed in).
+ client2 := c.makeClient(clientPublicEmail2, keyPublicEmail2, true)
+ build2 := testBuild(2)
+ client2.UploadBuild(build2)
+ client2.ReportCrash(testCrash(build2, 1))
+ client2.pollEmailBug()
+
+ // Output before caching.
+ before := map[AccessLevel][]*uiBugGroup{}
+ for _, accessLevel := range []AccessLevel{AccessPublic, AccessUser} {
+ orig, err := fetchNamespaceBugs(c.ctx, accessLevel, "access-public", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ assert.NotNil(t, orig)
+ before[accessLevel] = orig
+ }
+
+ // Update cache.
+ _, err := c.AuthGET(AccessAdmin, "/cron/minute_cache_update")
+ c.expectOK(err)
+
+ // Now query the groups from cache.
+ for _, accessLevel := range []AccessLevel{AccessPublic, AccessUser} {
+ cached, err := CachedBugGroups(c.ctx, "access-public", accessLevel)
+ if err != nil {
+ t.Fatal(err)
+ }
+ assert.Equal(t, before[accessLevel], cached)
+ // Ensure that the web dashboard page loads after cache is set.
+ _, err = c.AuthGET(accessLevel, "/access-public")
+ c.expectOK(err)
+ }
+}
+
+// Ensure we can serve pages with empty cache.
+func TestBugListWithoutCache(t *testing.T) {
+ c := NewCtx(t)
+ defer c.Close()
+
+ assert.True(t, getNsConfig(c.ctx, "access-public").CacheUIPages)
+ for _, accessLevel := range []AccessLevel{AccessPublic, AccessUser, AccessAdmin} {
+ _, err := c.AuthGET(accessLevel, "/access-public")
+ c.expectOK(err)
+ }
+}
diff --git a/dashboard/app/config.go b/dashboard/app/config.go
index da31cdac5..7a901a181 100644
--- a/dashboard/app/config.go
+++ b/dashboard/app/config.go
@@ -114,6 +114,8 @@ type Config struct {
Subsystems SubsystemsConfig
// Instead of Last acitivity, display Discussions on the main page.
DisplayDiscussions bool
+ // Cache what we display on the web dashboard.
+ CacheUIPages bool
}
// DiscussionEmailConfig defines the correspondence between an email and a DiscussionSource.
diff --git a/dashboard/app/cron.yaml b/dashboard/app/cron.yaml
index 3813ae9de..76d27954c 100644
--- a/dashboard/app/cron.yaml
+++ b/dashboard/app/cron.yaml
@@ -6,6 +6,8 @@ cron:
schedule: every 1 minutes
- url: /cron/cache_update
schedule: every 1 hours
+- url: /cron/minute_cache_update
+ schedule: every 1 minutes
- url: /cron/deprecate_assets
schedule: every 1 hours
- url: /cron/kcidb_poll
diff --git a/dashboard/app/main.go b/dashboard/app/main.go
index 9fa76c6e4..18e43a897 100644
--- a/dashboard/app/main.go
+++ b/dashboard/app/main.go
@@ -69,6 +69,7 @@ func initHTTPHandlers() {
http.Handle("/"+ns+"/s/", handlerWrapper(handleSubsystemPage))
}
http.HandleFunc("/cron/cache_update", cacheUpdate)
+ http.HandleFunc("/cron/minute_cache_update", handleMinuteCacheUpdate)
http.HandleFunc("/cron/deprecate_assets", handleDeprecateAssets)
http.HandleFunc("/cron/refresh_subsystems", handleRefreshSubsystems)
http.HandleFunc("/cron/subsystem_reports", handleSubsystemReports)
@@ -1503,15 +1504,29 @@ func fetchFixPendingBugs(c context.Context, ns, manager string) ([]*Bug, error)
func fetchNamespaceBugs(c context.Context, accessLevel AccessLevel, ns string,
filter *userBugFilter) ([]*uiBugGroup, error) {
- bugs, err := loadVisibleBugs(c, accessLevel, ns, filter)
+ if !filter.Any() && getNsConfig(c, ns).CacheUIPages {
+ // If there's no filter, try to fetch data from cache.
+ cached, err := CachedBugGroups(c, ns, accessLevel)
+ if err != nil {
+ log.Errorf(c, "failed to fetch from bug groups cache: %v", err)
+ } else if cached != nil {
+ return cached, nil
+ }
+ }
+ bugs, err := loadVisibleBugs(c, ns, filter)
if err != nil {
return nil, err
}
- state, err := loadReportingState(c)
+ managers, err := managerList(c, ns)
if err != nil {
return nil, err
}
- managers, err := managerList(c, ns)
+ return prepareBugGroups(c, bugs, managers, accessLevel, ns)
+}
+
+func prepareBugGroups(c context.Context, bugs []*Bug, managers []string,
+ accessLevel AccessLevel, ns string) ([]*uiBugGroup, error) {
+ state, err := loadReportingState(c)
if err != nil {
return nil, err
}
@@ -1579,8 +1594,7 @@ func fetchNamespaceBugs(c context.Context, accessLevel AccessLevel, ns string,
return uiGroups, nil
}
-func loadVisibleBugs(c context.Context, accessLevel AccessLevel, ns string,
- bugFilter *userBugFilter) ([]*Bug, error) {
+func loadVisibleBugs(c context.Context, ns string, bugFilter *userBugFilter) ([]*Bug, error) {
// Load open and dup bugs in in 2 separate queries.
// Ideally we load them in one query with a suitable filter,
// but unfortunately status values don't allow one query (<BugStatusFixed || >BugStatusInvalid).
@@ -1673,6 +1687,9 @@ func fetchTerminalBugs(c context.Context, accessLevel AccessLevel,
}
func applyBugFilter(query *db.Query, filter *userBugFilter) *db.Query {
+ if filter == nil {
+ return query
+ }
manager := filter.ManagerName()
if len(filter.Labels) > 0 {
// Take just the first one.