aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2026-02-19 13:10:31 +0100
committerDmitry Vyukov <dvyukov@google.com>2026-02-19 15:25:05 +0000
commitc8d8c52d6e4d7bc5716f7e5848dad2de4aea55e1 (patch)
treed1e09b4292614b320528630a8e62d1d9681c228a
parent73a252ac8d271a17af0607da173d8821b8ce4311 (diff)
dashboard/app: fix API access checks
Currently we permit per-namespace clients to access global APIs. For example, a secondary OS syz-manager client can poll bugs from all namespaces. This is wrong and bad. Don't permit namespace clients to access global APIs.
-rw-r--r--dashboard/app/access_test.go38
-rw-r--r--dashboard/app/ai_test.go28
-rw-r--r--dashboard/app/api.go80
-rw-r--r--dashboard/app/api_test.go27
-rw-r--r--dashboard/app/app_test.go22
-rw-r--r--dashboard/app/asset_storage_test.go20
-rw-r--r--dashboard/app/bisect_test.go92
-rw-r--r--dashboard/app/cache_test.go6
-rw-r--r--dashboard/app/commit_poll_test.go6
-rw-r--r--dashboard/app/discussion_test.go12
-rw-r--r--dashboard/app/email_test.go2
-rw-r--r--dashboard/app/fix_test.go92
-rw-r--r--dashboard/app/jobs_test.go178
-rw-r--r--dashboard/app/linux_reporting_test.go16
-rw-r--r--dashboard/app/main_test.go22
-rw-r--r--dashboard/app/notifications_test.go14
-rw-r--r--dashboard/app/public_json_api_test.go16
-rw-r--r--dashboard/app/reporting_test.go258
-rw-r--r--dashboard/app/repro_test.go30
-rw-r--r--dashboard/app/subsystem_test.go20
-rw-r--r--dashboard/app/tree_test.go58
-rw-r--r--dashboard/app/util_test.go4
22 files changed, 548 insertions, 493 deletions
diff --git a/dashboard/app/access_test.go b/dashboard/app/access_test.go
index 440f81f9b..f8a71ecdd 100644
--- a/dashboard/app/access_test.go
+++ b/dashboard/app/access_test.go
@@ -305,12 +305,12 @@ func TestAccess(t *testing.T) {
crashInvalid := testCrashWithRepro(build, reportingIdx*10+0)
client.ReportCrash(crashInvalid)
- repInvalid := client.pollBug()
+ repInvalid := c.globalClient.pollBug()
if reportingIdx != 0 {
- client.updateBug(repInvalid.ID, dashapi.BugStatusUpstream, "")
- repInvalid = client.pollBug()
+ c.globalClient.updateBug(repInvalid.ID, dashapi.BugStatusUpstream, "")
+ repInvalid = c.globalClient.pollBug()
}
- client.updateBug(repInvalid.ID, dashapi.BugStatusInvalid, "")
+ c.globalClient.updateBug(repInvalid.ID, dashapi.BugStatusInvalid, "")
// Invalid bugs become visible up to the last reporting.
finalLevel := c.config().Namespaces[ns].
Reporting[len(c.config().Namespaces[ns].Reporting)-1].AccessLevel
@@ -318,12 +318,12 @@ func TestAccess(t *testing.T) {
crashFixed := testCrashWithRepro(build, reportingIdx*10+0)
client.ReportCrash(crashFixed)
- repFixed := client.pollBug()
+ repFixed := c.globalClient.pollBug()
if reportingIdx != 0 {
- client.updateBug(repFixed.ID, dashapi.BugStatusUpstream, "")
- repFixed = client.pollBug()
+ c.globalClient.updateBug(repFixed.ID, dashapi.BugStatusUpstream, "")
+ repFixed = c.globalClient.pollBug()
}
- reply, _ := client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: repFixed.ID,
Status: dashapi.BugStatusOpen,
FixCommits: []string{ns + "-patch0"},
@@ -354,21 +354,21 @@ func TestAccess(t *testing.T) {
},
}
client.ReportCrash(crashOpen)
- repOpen := client.pollBug()
+ repOpen := c.globalClient.pollBug()
if reportingIdx != 0 {
- client.updateBug(repOpen.ID, dashapi.BugStatusUpstream, "")
- repOpen = client.pollBug()
+ c.globalClient.updateBug(repOpen.ID, dashapi.BugStatusUpstream, "")
+ repOpen = c.globalClient.pollBug()
}
noteBugAccessLevel(repOpen.ID, accessLevel, nsLevel)
crashPatched := testCrashWithRepro(build, reportingIdx*10+1)
client.ReportCrash(crashPatched)
- repPatched := client.pollBug()
+ repPatched := c.globalClient.pollBug()
if reportingIdx != 0 {
- client.updateBug(repPatched.ID, dashapi.BugStatusUpstream, "")
- repPatched = client.pollBug()
+ c.globalClient.updateBug(repPatched.ID, dashapi.BugStatusUpstream, "")
+ repPatched = c.globalClient.pollBug()
}
- reply, _ = client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ = c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: repPatched.ID,
Status: dashapi.BugStatusOpen,
FixCommits: []string{ns + "-patch0"},
@@ -381,12 +381,12 @@ func TestAccess(t *testing.T) {
crashDup := testCrashWithRepro(build, reportingIdx*10+2)
client.ReportCrash(crashDup)
- repDup := client.pollBug()
+ repDup := c.globalClient.pollBug()
if reportingIdx != 0 {
- client.updateBug(repDup.ID, dashapi.BugStatusUpstream, "")
- repDup = client.pollBug()
+ c.globalClient.updateBug(repDup.ID, dashapi.BugStatusUpstream, "")
+ repDup = c.globalClient.pollBug()
}
- client.updateBug(repDup.ID, dashapi.BugStatusDup, repOpen.ID)
+ c.globalClient.updateBug(repDup.ID, dashapi.BugStatusDup, repOpen.ID)
noteBugAccessLevel(repDup.ID, accessLevel, nsLevel)
}
}
diff --git a/dashboard/app/ai_test.go b/dashboard/app/ai_test.go
index dce064b96..351ef9566 100644
--- a/dashboard/app/ai_test.go
+++ b/dashboard/app/ai_test.go
@@ -67,7 +67,7 @@ func TestAIBugWorkflows(t *testing.T) {
requireWorkflows(kcsanBug, nil)
requireWorkflows(kasanBug, nil)
- _, err := c.aiClient.AIJobPoll(&dashapi.AIJobPollReq{
+ _, err := c.globalClient.AIJobPoll(&dashapi.AIJobPollReq{
CodeRevision: prog.GitRevision,
Workflows: []dashapi.AIWorkflow{
{Type: "patching", Name: "patching"},
@@ -80,7 +80,7 @@ func TestAIBugWorkflows(t *testing.T) {
// This should make patching-foo inactive.
c.advanceTime(2 * 24 * time.Hour)
- _, err = c.aiClient.AIJobPoll(&dashapi.AIJobPollReq{
+ _, err = c.globalClient.AIJobPoll(&dashapi.AIJobPollReq{
CodeRevision: prog.GitRevision,
Workflows: []dashapi.AIWorkflow{
{Type: "patching", Name: "patching"},
@@ -91,7 +91,7 @@ func TestAIBugWorkflows(t *testing.T) {
})
require.NoError(t, err)
- _, err = c.aiClient.AIJobPoll(&dashapi.AIJobPollReq{
+ _, err = c.globalClient.AIJobPoll(&dashapi.AIJobPollReq{
CodeRevision: prog.GitRevision,
Workflows: []dashapi.AIWorkflow{
{Type: "patching", Name: "patching"},
@@ -118,7 +118,7 @@ func TestAIJob(t *testing.T) {
c.aiClient.ReportCrash(crash)
c.aiClient.pollEmailBug()
- resp, err := c.aiClient.AIJobPoll(&dashapi.AIJobPollReq{
+ resp, err := c.globalClient.AIJobPoll(&dashapi.AIJobPollReq{
CodeRevision: prog.GitRevision,
Workflows: []dashapi.AIWorkflow{
{Type: "assessment-kcsan", Name: "assessment-kcsan"},
@@ -137,7 +137,7 @@ func TestAIJob(t *testing.T) {
"ReproOpts": "",
})
- resp2, err2 := c.aiClient.AIJobPoll(&dashapi.AIJobPollReq{
+ resp2, err2 := c.globalClient.AIJobPoll(&dashapi.AIJobPollReq{
CodeRevision: prog.GitRevision,
Workflows: []dashapi.AIWorkflow{
{Type: "assessment-kcsan", Name: "assessment-kcsan"},
@@ -146,7 +146,7 @@ func TestAIJob(t *testing.T) {
require.NoError(t, err2)
require.Equal(t, resp2.ID, "")
- require.NoError(t, c.aiClient.AITrajectoryLog(&dashapi.AITrajectoryReq{
+ require.NoError(t, c.globalClient.AITrajectoryLog(&dashapi.AITrajectoryReq{
JobID: resp.ID,
Span: &trajectory.Span{
Seq: 0,
@@ -156,7 +156,7 @@ func TestAIJob(t *testing.T) {
},
}))
- require.NoError(t, c.aiClient.AITrajectoryLog(&dashapi.AITrajectoryReq{
+ require.NoError(t, c.globalClient.AITrajectoryLog(&dashapi.AITrajectoryReq{
JobID: resp.ID,
Span: &trajectory.Span{
Seq: 1,
@@ -167,7 +167,7 @@ func TestAIJob(t *testing.T) {
},
}))
- require.NoError(t, c.aiClient.AITrajectoryLog(&dashapi.AITrajectoryReq{
+ require.NoError(t, c.globalClient.AITrajectoryLog(&dashapi.AITrajectoryReq{
JobID: resp.ID,
Span: &trajectory.Span{
Seq: 1,
@@ -180,7 +180,7 @@ func TestAIJob(t *testing.T) {
},
}))
- require.NoError(t, c.aiClient.AITrajectoryLog(&dashapi.AITrajectoryReq{
+ require.NoError(t, c.globalClient.AITrajectoryLog(&dashapi.AITrajectoryReq{
JobID: resp.ID,
Span: &trajectory.Span{
Seq: 0,
@@ -191,7 +191,7 @@ func TestAIJob(t *testing.T) {
},
}))
- require.NoError(t, c.aiClient.AIJobDone(&dashapi.AIJobDoneReq{
+ require.NoError(t, c.globalClient.AIJobDone(&dashapi.AIJobDoneReq{
ID: resp.ID,
Results: map[string]any{
"Patch": "patch",
@@ -213,7 +213,7 @@ func TestAIAssessmentKCSAN(t *testing.T) {
c.aiClient.ReportCrash(crash)
extID := c.aiClient.pollEmailExtID()
- resp, err := c.aiClient.AIJobPoll(&dashapi.AIJobPollReq{
+ resp, err := c.globalClient.AIJobPoll(&dashapi.AIJobPollReq{
CodeRevision: prog.GitRevision,
Workflows: []dashapi.AIWorkflow{
{Type: ai.WorkflowAssessmentKCSAN, Name: string(ai.WorkflowAssessmentKCSAN)},
@@ -229,7 +229,7 @@ func TestAIAssessmentKCSAN(t *testing.T) {
_, err = c.GET(fmt.Sprintf("/ai_job?id=%v&correct=%v", resp.ID, aiCorrectnessCorrect))
require.Error(t, err)
- require.NoError(t, c.aiClient.AIJobDone(&dashapi.AIJobDoneReq{
+ require.NoError(t, c.globalClient.AIJobDone(&dashapi.AIJobDoneReq{
ID: resp.ID,
Results: map[string]any{
"Confident": true,
@@ -285,7 +285,7 @@ func TestAIJobsFiltering(t *testing.T) {
c.aiClient.ReportCrash(crash)
c.aiClient.pollEmailBug()
- pollResp, err := c.aiClient.AIJobPoll(&dashapi.AIJobPollReq{
+ pollResp, err := c.globalClient.AIJobPoll(&dashapi.AIJobPollReq{
CodeRevision: prog.GitRevision,
Workflows: []dashapi.AIWorkflow{
{Type: ai.WorkflowAssessmentKCSAN, Name: string(ai.WorkflowAssessmentKCSAN)},
@@ -322,7 +322,7 @@ func TestAIJobCustomCommit(t *testing.T) {
extID := c.aiClient.pollEmailExtID()
bug, _, _ := c.loadBug(extID)
- _, err := c.aiClient.AIJobPoll(&dashapi.AIJobPollReq{
+ _, err := c.globalClient.AIJobPoll(&dashapi.AIJobPollReq{
CodeRevision: prog.GitRevision,
Workflows: []dashapi.AIWorkflow{
{Type: ai.WorkflowPatching, Name: "patching"},
diff --git a/dashboard/app/api.go b/dashboard/app/api.go
index 109952f73..da5a1b38f 100644
--- a/dashboard/app/api.go
+++ b/dashboard/app/api.go
@@ -46,23 +46,23 @@ func initAPIHandlers() {
}
var apiHandlers = map[string]APIHandler{
- "log_error": typedHandler(apiLogError),
- "job_poll": typedHandler(apiJobPoll),
- "job_reset": typedHandler(apiJobReset),
- "job_done": typedHandler(apiJobDone),
- "reporting_poll_bugs": typedHandler(apiReportingPollBugs),
- "reporting_poll_notifs": typedHandler(apiReportingPollNotifications),
- "reporting_poll_closed": typedHandler(apiReportingPollClosed),
- "reporting_update": typedHandler(apiReportingUpdate),
- "new_test_job": typedHandler(apiNewTestJob),
- "needed_assets": typedHandler(apiNeededAssetsList),
- "load_full_bug": typedHandler(apiLoadFullBug),
- "save_discussion": typedHandler(apiSaveDiscussion),
- "create_upload_url": typedHandler(apiCreateUploadURL),
- "send_email": typedHandler(apiSendEmail),
- "ai_job_poll": typedHandler(apiAIJobPoll),
- "ai_job_done": typedHandler(apiAIJobDone),
- "ai_trajectory_log": typedHandler(apiAITrajectoryLog),
+ "log_error": anyHandler(apiLogError),
+ "job_poll": globalHandler(apiJobPoll),
+ "job_reset": globalHandler(apiJobReset),
+ "job_done": globalHandler(apiJobDone),
+ "reporting_poll_bugs": globalHandler(apiReportingPollBugs),
+ "reporting_poll_notifs": globalHandler(apiReportingPollNotifications),
+ "reporting_poll_closed": globalHandler(apiReportingPollClosed),
+ "reporting_update": globalHandler(apiReportingUpdate),
+ "new_test_job": globalHandler(apiNewTestJob),
+ "needed_assets": globalHandler(apiNeededAssetsList),
+ "load_full_bug": globalHandler(apiLoadFullBug),
+ "save_discussion": globalHandler(apiSaveDiscussion),
+ "create_upload_url": globalHandler(apiCreateUploadURL),
+ "send_email": globalHandler(apiSendEmail),
+ "ai_job_poll": globalHandler(apiAIJobPoll),
+ "ai_job_done": globalHandler(apiAIJobDone),
+ "ai_trajectory_log": globalHandler(apiAITrajectoryLog),
"save_coverage": gcsPayloadHandler(apiSaveCoverage),
"upload_build": nsHandler(apiUploadBuild),
"builder_poll": nsHandler(apiBuilderPoll),
@@ -147,6 +147,11 @@ func handleAPI(ctx context.Context, r *http.Request) (any, error) {
if err != nil {
return nil, fmt.Errorf("checkClient('%s') error: %w", client, err)
}
+ apiContext := &APIContext{
+ client: client,
+ ns: ns,
+ }
+ ctx = context.WithValue(ctx, &apiContextKey, apiContext)
var payloadReader io.Reader
if str := r.PostFormValue("payload"); str != "" {
gr, err := gzip.NewReader(strings.NewReader(str))
@@ -161,21 +166,26 @@ func handleAPI(ctx context.Context, r *http.Request) (any, error) {
if !exists {
return nil, fmt.Errorf("unknown api method %q", method)
}
- reply, err := handler(contextWithNamespace(ctx, ns), payloadReader)
+ reply, err := handler(ctx, payloadReader)
+ if err == nil && !apiContext.nsChecked {
+ err = fmt.Errorf("API handler did not check namespace")
+ }
if err != nil {
- err = fmt.Errorf("method '%s' ns '%s' err: %w", method, ns, err)
+ err = fmt.Errorf("method %q ns %q err: %w", method, ns, err)
}
return reply, err
}
-var contextKeyNamespace = "context namespace available for any APIHandler"
+var apiContextKey = "context available for any APIHandler"
-func contextWithNamespace(ctx context.Context, ns string) context.Context {
- return context.WithValue(ctx, &contextKeyNamespace, ns)
+type APIContext struct {
+ client string
+ ns string
+ nsChecked bool
}
-func contextNamespace(ctx context.Context) string {
- return ctx.Value(&contextKeyNamespace).(string)
+func apiContext(ctx context.Context) *APIContext {
+ return ctx.Value(&apiContextKey).(*APIContext)
}
// gcsPayloadHandler json.Decode the gcsURL from payload and stream pointed content.
@@ -210,14 +220,34 @@ func gcsPayloadHandler(handler APIHandler) APIHandler {
func nsHandler[Req any](handler func(context.Context, string, *Req) (any, error)) APIHandler {
return typedHandler(func(ctx context.Context, req *Req) (any, error) {
- ns := contextNamespace(ctx)
+ ns := apiContext(ctx).ns
if ns == "" {
return nil, fmt.Errorf("must be called within a namespace")
}
+ apiContext(ctx).nsChecked = true
return handler(ctx, ns, req)
})
}
+func globalHandler[Req any](handler func(context.Context, *Req) (any, error)) APIHandler {
+ return typedHandler(func(ctx context.Context, req *Req) (any, error) {
+ ns := apiContext(ctx).ns
+ if ns != "" {
+ return nil, fmt.Errorf("must not be called within a namespace")
+ }
+ apiContext(ctx).nsChecked = true
+ return handler(ctx, req)
+ })
+}
+
+// anyHandler can be used by both global and namespace-specific clients.
+func anyHandler[Req any](handler func(context.Context, *Req) (any, error)) APIHandler {
+ return typedHandler(func(ctx context.Context, req *Req) (any, error) {
+ apiContext(ctx).nsChecked = true
+ return handler(ctx, req)
+ })
+}
+
func typedHandler[Req any](handler func(context.Context, *Req) (any, error)) APIHandler {
return func(ctx context.Context, payload io.Reader) (any, error) {
req := new(Req)
diff --git a/dashboard/app/api_test.go b/dashboard/app/api_test.go
index dae783e19..62f5acd10 100644
--- a/dashboard/app/api_test.go
+++ b/dashboard/app/api_test.go
@@ -7,12 +7,14 @@ import (
"context"
"slices"
"sort"
+ "strings"
"testing"
"time"
"github.com/google/syzkaller/dashboard/dashapi"
"github.com/google/syzkaller/sys/targets"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestClientSecretOK(t *testing.T) {
@@ -72,6 +74,23 @@ func TestClientNamespaceOK(t *testing.T) {
}
}
+func TestClientNamespaceAccess(t *testing.T) {
+ c := NewCtx(t)
+ defer c.Close()
+
+ // A global client must not be able to call per-namespace APIs.
+ globalClient := c.makeClient(reportingClient, reportingKey, false)
+ err := globalClient.UploadBuild(testBuild(1))
+ require.Error(t, err)
+ require.True(t, strings.Contains(err.Error(), "must be called within a namespace"))
+
+ // A namespace client must not be able to call global APIs.
+ nsClient := c.makeClient(client1, password1, false)
+ _, err = nsClient.ReportingPollBugs("test")
+ require.Error(t, err)
+ require.True(t, strings.Contains(err.Error(), "must not be called within a namespace"))
+}
+
func TestEmergentlyStoppedEmail(t *testing.T) {
c := NewCtx(t)
defer c.Close()
@@ -135,7 +154,7 @@ func TestEmergentlyStoppedExternalReport(t *testing.T) {
// There should be no email.
c.advanceTime(time.Hour)
- client.pollBugs(0)
+ c.globalClient.pollBugs(0)
}
func TestEmergentlyStoppedEmailJob(t *testing.T) {
@@ -162,7 +181,7 @@ func TestEmergentlyStoppedEmailJob(t *testing.T) {
c.expectNoEmail()
// Emulate a finished job.
- pollResp := client.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.Type, dashapi.JobTestPatch)
c.advanceTime(time.Hour)
@@ -173,7 +192,7 @@ func TestEmergentlyStoppedEmailJob(t *testing.T) {
CrashLog: []byte("test crash log"),
CrashReport: []byte("test crash report"),
}
- client.JobDone(jobDoneReq)
+ c.globalClient.JobDone(jobDoneReq)
// Now we emergently stop syzbot.
c.advanceTime(time.Hour)
@@ -313,7 +332,7 @@ func TestCreateUploadURL(t *testing.T) {
return contextWithConfig(c, &newConfig)
}
- url, err := c.client.CreateUploadURL()
+ url, err := c.globalClient.CreateUploadURL()
assert.NoError(t, err)
assert.Regexp(t, "blobstorage/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}.upload", url)
}
diff --git a/dashboard/app/app_test.go b/dashboard/app/app_test.go
index 7953a2986..4c4f12642 100644
--- a/dashboard/app/app_test.go
+++ b/dashboard/app/app_test.go
@@ -63,7 +63,7 @@ var testConfig = &GlobalConfig{
},
},
Clients: map[string]string{
- "reporting": "reportingkeyreportingkeyreportingkey",
+ reportingClient: reportingKey,
},
EmailBlocklist: []string{
"\"Bar\" <Blocked@Domain.com>",
@@ -739,6 +739,8 @@ const (
keyAI = "clientaikeyclientaikeyclientaikey"
clientSkipStage = "client-skip-stage"
keySkipStage = "skipstagekeyskipstagekeyskipstagekey"
+ reportingClient = "reporting"
+ reportingKey = "reportingkeyreportingkeyreportingkey"
restrictedManager = "restricted-manager"
noFixBisectionManager = "no-fix-bisection-manager"
@@ -847,14 +849,14 @@ func TestApp(t *testing.T) {
crash1 := testCrash(build, 1)
c.client.ReportCrash(crash1)
- c.client.pollBug()
+ c.globalClient.pollBug()
// Test that namespace isolation works.
c.expectFail("unknown build", apiClient2.Query("report_crash", crash1, nil))
crash2 := testCrashWithRepro(build, 2)
c.client.ReportCrash(crash2)
- c.client.pollBug()
+ c.globalClient.pollBug()
// Provoke purgeOldCrashes.
const purgeTestIters = 30
@@ -868,7 +870,7 @@ func TestApp(t *testing.T) {
crash.Report = []byte(fmt.Sprintf("report%v", i))
c.client.ReportCrash(crash)
}
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
bug, _, _ := c.loadBug(rep.ID)
c.expectNE(bug, nil)
c.expectEQ(bug.DailyStats, []BugDailyStats{
@@ -882,9 +884,9 @@ func TestApp(t *testing.T) {
}
c.client.ReportFailedRepro(cid)
- c.client.ReportingPollBugs("test")
+ c.globalClient.ReportingPollBugs("test")
- c.client.ReportingUpdate(&dashapi.BugUpdate{
+ c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: "id",
Status: dashapi.BugStatusOpen,
ReproLevel: dashapi.ReproLevelC,
@@ -990,17 +992,17 @@ func TestPurgeOldCrashes(t *testing.T) {
crash := testCrash(build, 1)
crash.ReproOpts = []byte("no repro")
c.client.ReportCrash(crash)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
crash.ReproSyz = []byte("getpid()")
crash.ReproOpts = []byte("syz repro")
c.client.ReportCrash(crash)
- c.client.pollBug()
+ c.globalClient.pollBug()
crash.ReproC = []byte("int main() {}")
crash.ReproOpts = []byte("C repro")
c.client.ReportCrash(crash)
- c.client.pollBug()
+ c.globalClient.pollBug()
// Now report lots of bugs with/without repros. Some of the older ones should be purged.
var totalReported = 3 * maxCrashes()
@@ -1071,7 +1073,7 @@ func TestPurgeOldCrashes(t *testing.T) {
}
// Unreport the first crash.
- reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusUpdate,
ReproLevel: dashapi.ReproLevelC,
diff --git a/dashboard/app/asset_storage_test.go b/dashboard/app/asset_storage_test.go
index 48a344105..3c15879dd 100644
--- a/dashboard/app/asset_storage_test.go
+++ b/dashboard/app/asset_storage_test.go
@@ -118,7 +118,7 @@ If you want to undo deduplication, reply with:
c.checkURLContents(kernelConfigLink, build.KernelConfig)
// We query the needed assets. We need all 3.
- needed, err := c.client2.NeededAssetsList()
+ needed, err := c.globalClient.NeededAssetsList()
c.expectOK(err)
sort.Strings(needed.DownloadURLs)
allDownloadURLs := []string{
@@ -129,12 +129,12 @@ If you want to undo deduplication, reply with:
c.expectEQ(needed.DownloadURLs, allDownloadURLs)
// Invalidate the bug.
- c.client.updateBug(extBugID, dashapi.BugStatusInvalid, "")
+ c.globalClient.updateBug(extBugID, dashapi.BugStatusInvalid, "")
_, err = c.GET("/cron/deprecate_assets")
c.expectOK(err)
// Query the needed assets once more, so far there should be no change.
- needed, err = c.client2.NeededAssetsList()
+ needed, err = c.globalClient.NeededAssetsList()
c.expectOK(err)
sort.Strings(needed.DownloadURLs)
c.expectEQ(needed.DownloadURLs, allDownloadURLs)
@@ -145,7 +145,7 @@ If you want to undo deduplication, reply with:
c.expectOK(err)
// Only the html asset should have persisted.
- needed, err = c.client2.NeededAssetsList()
+ needed, err = c.globalClient.NeededAssetsList()
c.expectOK(err)
c.expectEQ(needed.DownloadURLs, []string{"http://google.com/coverage.html"})
}
@@ -209,7 +209,7 @@ func TestCoverReportDeprecation(t *testing.T) {
ensureNeeded := func(needed []string) {
_, err := c.GET("/cron/deprecate_assets")
c.expectOK(err)
- neededResp, err := c.client.NeededAssetsList()
+ neededResp, err := c.globalClient.NeededAssetsList()
c.expectOK(err)
sort.Strings(neededResp.DownloadURLs)
sort.Strings(needed)
@@ -285,7 +285,7 @@ func TestFreshBuildAssets(t *testing.T) {
ensureNeeded := func(needed []string) {
_, err := c.GET("/cron/deprecate_assets")
c.expectOK(err)
- neededResp, err := c.client.NeededAssetsList()
+ neededResp, err := c.globalClient.NeededAssetsList()
c.expectOK(err)
sort.Strings(neededResp.DownloadURLs)
sort.Strings(needed)
@@ -427,7 +427,7 @@ If you want to undo deduplication, reply with:
c.checkURLContents(kernelConfigLink, build.KernelConfig)
// We query the needed assets. We need all 3.
- needed, err := c.client2.NeededAssetsList()
+ needed, err := c.globalClient.NeededAssetsList()
c.expectOK(err)
sort.Strings(needed.DownloadURLs)
allDownloadURLs := []string{
@@ -438,12 +438,12 @@ If you want to undo deduplication, reply with:
c.expectEQ(needed.DownloadURLs, allDownloadURLs)
// Invalidate the bug.
- c.client.updateBug(extBugID, dashapi.BugStatusInvalid, "")
+ c.globalClient.updateBug(extBugID, dashapi.BugStatusInvalid, "")
_, err = c.GET("/cron/deprecate_assets")
c.expectOK(err)
// Query the needed assets once more, so far there should be no change.
- needed, err = c.client2.NeededAssetsList()
+ needed, err = c.globalClient.NeededAssetsList()
c.expectOK(err)
sort.Strings(needed.DownloadURLs)
c.expectEQ(needed.DownloadURLs, allDownloadURLs)
@@ -454,7 +454,7 @@ If you want to undo deduplication, reply with:
c.expectOK(err)
// Nothing should have been persisted.
- needed, err = c.client2.NeededAssetsList()
+ needed, err = c.globalClient.NeededAssetsList()
c.expectOK(err)
c.expectEQ(needed.DownloadURLs, []string{})
}
diff --git a/dashboard/app/bisect_test.go b/dashboard/app/bisect_test.go
index 9ef472c7e..2612f8c22 100644
--- a/dashboard/app/bisect_test.go
+++ b/dashboard/app/bisect_test.go
@@ -29,7 +29,7 @@ func TestBisectCause(t *testing.T) {
c.client2.pollEmailBug()
// No repro - no bisection.
- pollResp := c.client2.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.ID, "")
// Now upload 4 crashes with repros.
@@ -69,7 +69,7 @@ func TestBisectCause(t *testing.T) {
// BisectFix #4
// BisectCause #3
- pollResp = c.client2.pollJobs(build.Manager)
+ pollResp = c.globalClient.pollJobs(build.Manager)
c.expectNE(pollResp.ID, "")
c.expectEQ(pollResp.Type, dashapi.JobBisectCause)
c.expectEQ(pollResp.Manager, build.Manager)
@@ -88,13 +88,13 @@ func TestBisectCause(t *testing.T) {
Log: []byte("bisect log 3"),
Error: []byte("bisect error 3"),
}
- c.expectOK(c.client2.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
c.expectNoEmail()
// BisectCause #2
pollResp2 := pollResp
c.advanceTime(time.Minute)
- pollResp = c.client2.pollJobs(build.Manager)
+ pollResp = c.globalClient.pollJobs(build.Manager)
c.expectNE(pollResp.ID, pollResp2.ID)
c.expectEQ(pollResp.ReproOpts, []byte("repro opts 2"))
@@ -125,7 +125,7 @@ func TestBisectCause(t *testing.T) {
},
}
done.Build.ID = jobID
- c.expectOK(c.client2.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
_, extBugID, err := email.RemoveAddrContext(msg2.Sender)
c.expectOK(err)
@@ -267,7 +267,7 @@ If you want to undo deduplication, reply with:
"default2@maintainers.com",
})
c.advanceTime(time.Minute)
- pollResp = c.client2.pollJobs(build.Manager)
+ pollResp = c.globalClient.pollJobs(build.Manager)
// Bisection succeeded.
jobID = pollResp.ID
@@ -296,7 +296,7 @@ If you want to undo deduplication, reply with:
},
}
done.Build.ID = jobID
- c.expectOK(c.client2.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
{
msg := c.pollEmailBug()
@@ -327,7 +327,7 @@ If you want to undo deduplication, reply with:
// BisectFix #2
c.advanceTime(time.Minute)
- pollResp = c.client2.pollJobs(build.Manager)
+ pollResp = c.globalClient.pollJobs(build.Manager)
c.expectNE(pollResp.ID, "")
c.expectEQ(pollResp.Type, dashapi.JobBisectFix)
c.expectEQ(pollResp.ReproOpts, []byte("repro opts 2"))
@@ -337,11 +337,11 @@ If you want to undo deduplication, reply with:
Log: []byte("bisect log 2"),
Error: []byte("bisect error 2"),
}
- c.expectOK(c.client2.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
// BisectFix #3
c.advanceTime(time.Minute)
- pollResp = c.client2.pollJobs(build.Manager)
+ pollResp = c.globalClient.pollJobs(build.Manager)
c.expectNE(pollResp.ID, "")
c.expectEQ(pollResp.Type, dashapi.JobBisectFix)
c.expectEQ(pollResp.ReproOpts, []byte("repro opts 3"))
@@ -350,11 +350,11 @@ If you want to undo deduplication, reply with:
Log: []byte("bisect log 3"),
Error: []byte("bisect error 3"),
}
- c.expectOK(c.client2.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
// BisectFix #4
c.advanceTime(time.Minute)
- pollResp = c.client2.pollJobs(build.Manager)
+ pollResp = c.globalClient.pollJobs(build.Manager)
c.expectNE(pollResp.ID, "")
c.expectEQ(pollResp.Type, dashapi.JobBisectFix)
c.expectEQ(pollResp.ReproOpts, []byte("repro opts 4"))
@@ -384,7 +384,7 @@ If you want to undo deduplication, reply with:
},
}
done.Build.ID = jobID
- c.expectOK(c.client2.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
_, extBugID, err = email.RemoveAddrContext(msg4.Sender)
c.expectOK(err)
@@ -437,7 +437,7 @@ For information about bisection process see: https://goo.gl/tpsmEJ#bisection
}
// No more bisection jobs.
- pollResp = c.client2.pollJobs(build.Manager)
+ pollResp = c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.ID, "")
}
@@ -451,7 +451,7 @@ func TestBisectCauseInconclusive(t *testing.T) {
c.client2.ReportCrash(crash)
msg := c.client2.pollEmailBug()
- pollResp := c.client2.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
jobID := pollResp.ID
done := &dashapi.JobDoneReq{
ID: jobID,
@@ -477,7 +477,7 @@ func TestBisectCauseInconclusive(t *testing.T) {
},
}
done.Build.ID = jobID
- c.expectOK(c.client2.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
_, extBugID, err := email.RemoveAddrContext(msg.Sender)
c.expectOK(err)
@@ -588,7 +588,7 @@ func TestUnreliableBisect(t *testing.T) {
c.client2.ReportCrash(crash)
_ = c.client2.pollEmailBug()
- pollResp := c.client2.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
jobID := pollResp.ID
done := &dashapi.JobDoneReq{
ID: jobID,
@@ -607,7 +607,7 @@ func TestUnreliableBisect(t *testing.T) {
},
}
done.Build.ID = jobID
- c.expectOK(c.client2.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
// The bisection result is unreliable - it shouldn't be reported.
c.expectNoEmail()
@@ -660,7 +660,7 @@ func TestBisectWrong(t *testing.T) {
c.client2.pollEmailBug()
{
- pollResp := c.client2.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
done := &dashapi.JobDoneReq{
ID: pollResp.ID,
Flags: flags,
@@ -677,7 +677,7 @@ func TestBisectWrong(t *testing.T) {
},
}
done.Build.ID = pollResp.ID
- c.expectOK(c.client2.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
if i == 0 {
msg := c.pollEmailBug()
c.expectTrue(strings.Contains(msg.Body, "syzbot has bisected this issue to:"))
@@ -687,7 +687,7 @@ func TestBisectWrong(t *testing.T) {
}
{
c.advanceTime(31 * 24 * time.Hour)
- pollResp := c.client2.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
done := &dashapi.JobDoneReq{
ID: pollResp.ID,
Flags: flags,
@@ -707,7 +707,7 @@ func TestBisectWrong(t *testing.T) {
},
}
done.Build.ID = pollResp.ID
- c.expectOK(c.client2.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
if i == 0 {
msg := c.pollEmailBug()
c.expectTrue(strings.Contains(msg.Body, "syzbot suspects this issue was fixed by commit:"))
@@ -749,7 +749,7 @@ func TestBisectCauseAncient(t *testing.T) {
c.client2.ReportCrash(crash)
msg := c.client2.pollEmailBug()
- pollResp := c.client2.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
jobID := pollResp.ID
done := &dashapi.JobDoneReq{
ID: jobID,
@@ -760,7 +760,7 @@ func TestBisectCauseAncient(t *testing.T) {
CrashReport: []byte("bisect crash report"),
}
done.Build.ID = jobID
- c.expectOK(c.client2.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
_, extBugID, err := email.RemoveAddrContext(msg.Sender)
c.expectOK(err)
@@ -869,9 +869,9 @@ func TestBisectCauseExternal(t *testing.T) {
c.client.UploadBuild(build)
crash := testCrashWithRepro(build, 1)
c.client.ReportCrash(crash)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
- pollResp := c.client.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
c.expectNE(pollResp.ID, "")
jobID := pollResp.ID
done := &dashapi.JobDoneReq{
@@ -890,14 +890,14 @@ func TestBisectCauseExternal(t *testing.T) {
},
}
done.Build.ID = jobID
- c.expectOK(c.client.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
- resp, _ := c.client.ReportingPollBugs("test")
+ resp, _ := c.globalClient.ReportingPollBugs("test")
c.expectEQ(len(resp.Reports), 1)
// Still reported because we did not ack.
- bisect := c.client.pollBug()
+ bisect := c.globalClient.pollBug()
// pollBug acks, must not be reported after that.
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
c.expectEQ(bisect.Type, dashapi.ReportBisectCause)
c.expectEQ(bisect.Title, rep.Title)
@@ -911,21 +911,21 @@ func TestBisectFixExternal(t *testing.T) {
c.client.UploadBuild(build)
crash := testCrashWithRepro(build, 1)
c.client.ReportCrash(crash)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
{
// Cause bisection fails.
- pollResp := c.client.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
done := &dashapi.JobDoneReq{
ID: pollResp.ID,
Log: []byte("bisect log"),
Error: []byte("bisect error"),
}
- c.expectOK(c.client.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
}
c.advanceTime(31 * 24 * time.Hour)
{
// Fix bisection succeeds.
- pollResp := c.client.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
done := &dashapi.JobDoneReq{
ID: pollResp.ID,
Build: *build,
@@ -944,8 +944,8 @@ func TestBisectFixExternal(t *testing.T) {
},
}
done.Build.ID = pollResp.ID
- c.expectOK(c.client.JobDone(done))
- rep := c.client.pollBug()
+ c.expectOK(c.globalClient.JobDone(done))
+ rep := c.globalClient.pollBug()
c.expectEQ(rep.Type, dashapi.ReportBisectFix)
}
{
@@ -967,7 +967,7 @@ func TestBisectCauseReproSyz(t *testing.T) {
crash.ReproC = nil
c.client2.ReportCrash(crash)
- pollResp := c.client2.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
jobID := pollResp.ID
done := &dashapi.JobDoneReq{
ID: jobID,
@@ -977,7 +977,7 @@ func TestBisectCauseReproSyz(t *testing.T) {
CrashLog: []byte("bisect crash log"),
}
done.Build.ID = jobID
- c.expectOK(c.client2.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
crash.ReproC = []byte("int main")
c.client2.ReportCrash(crash)
@@ -1001,7 +1001,7 @@ func TestBisectCauseReproSyz2(t *testing.T) {
crash.ReproC = nil
c.client2.ReportCrash(crash)
- pollResp := c.client2.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
jobID := pollResp.ID
done := &dashapi.JobDoneReq{
ID: jobID,
@@ -1011,7 +1011,7 @@ func TestBisectCauseReproSyz2(t *testing.T) {
CrashLog: []byte("bisect crash log"),
}
done.Build.ID = jobID
- c.expectOK(c.client2.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
msg := c.client2.pollEmailBug()
if !strings.Contains(msg.Body, "syzbot found the following issue") {
@@ -1121,7 +1121,7 @@ func TestBugBisectionInvalidation(t *testing.T) {
// Wait 30 days, no new cause bisection jobs should be created.
c.advanceTime(24 * 30 * time.Hour)
- resp := c.client2.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{
+ resp := c.globalClient.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{
BisectCause: true,
})
c.expectEQ(resp.ID, "")
@@ -1135,7 +1135,7 @@ func TestBugBisectionInvalidation(t *testing.T) {
// The bisection should be started again.
c.advanceTime(time.Hour)
- resp = c.client2.pollJobs(build.Manager)
+ resp = c.globalClient.pollJobs(build.Manager)
c.client2.expectNE(resp.ID, "")
c.client2.expectEQ(resp.Type, dashapi.JobBisectCause)
}
@@ -1159,7 +1159,7 @@ func addBuildAndCrash(c *Ctx) (*dashapi.Build, *dashapi.Crash) {
// Poll a JobBisectCause and send cause information.
func addBisectCauseJob(c *Ctx, build *dashapi.Build) (*dashapi.JobPollResp, *dashapi.JobDoneReq, string) {
- resp := c.client2.pollJobs(build.Manager)
+ resp := c.globalClient.pollJobs(build.Manager)
c.client2.expectNE(resp.ID, "")
c.client2.expectEQ(resp.Type, dashapi.JobBisectCause)
jobID := resp.ID
@@ -1187,7 +1187,7 @@ func addBisectCauseJob(c *Ctx, build *dashapi.Build) (*dashapi.JobPollResp, *das
},
},
}
- c.expectOK(c.client2.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
c.advanceTime(24 * time.Hour)
msg := c.client2.pollEmailBug()
@@ -1198,7 +1198,7 @@ func addBisectCauseJob(c *Ctx, build *dashapi.Build) (*dashapi.JobPollResp, *das
// Poll a JobBisectfix and send fix information.
func addBisectFixJob(c *Ctx, build *dashapi.Build) (*dashapi.JobPollResp, *dashapi.JobDoneReq, string) {
- resp := c.client2.pollJobs(build.Manager)
+ resp := c.globalClient.pollJobs(build.Manager)
c.client2.expectNE(resp.ID, "")
c.client2.expectEQ(resp.Type, dashapi.JobBisectFix)
jobID := resp.ID
@@ -1226,7 +1226,7 @@ func addBisectFixJob(c *Ctx, build *dashapi.Build) (*dashapi.JobPollResp, *dasha
},
},
}
- c.expectOK(c.client2.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
msg := c.client2.pollEmailBug()
c.expectTrue(strings.Contains(msg.Body, "syzbot suspects this issue was fixed by commit:"))
diff --git a/dashboard/app/cache_test.go b/dashboard/app/cache_test.go
index db4e1a809..044d75e97 100644
--- a/dashboard/app/cache_test.go
+++ b/dashboard/app/cache_test.go
@@ -22,14 +22,14 @@ func TestCachedBugGroups(t *testing.T) {
crash := testCrash(build, 1)
crash.Title = "user-visible bug"
client.ReportCrash(crash)
- client.pollBug()
+ c.globalClient.pollBug()
// Bug at the second (AccessPublic) stage.
crash2 := testCrash(build, 2)
crash2.Title = "public-visible bug"
client.ReportCrash(crash2)
- client.updateBug(client.pollBug().ID, dashapi.BugStatusUpstream, "")
- client.pollBug()
+ c.globalClient.updateBug(c.globalClient.pollBug().ID, dashapi.BugStatusUpstream, "")
+ c.globalClient.pollBug()
// Add a build in a separate namespace (to check it's not mixed in).
client2 := c.makeClient(clientPublicEmail2, keyPublicEmail2, true)
diff --git a/dashboard/app/commit_poll_test.go b/dashboard/app/commit_poll_test.go
index 5f80db826..e0f6fa92f 100644
--- a/dashboard/app/commit_poll_test.go
+++ b/dashboard/app/commit_poll_test.go
@@ -19,11 +19,11 @@ func TestCommitPoll(t *testing.T) {
crash1 := testCrash(build1, 1)
c.client.ReportCrash(crash1)
- rep1 := c.client.pollBug()
+ rep1 := c.globalClient.pollBug()
crash2 := testCrash(build1, 2)
c.client.ReportCrash(crash2)
- rep2 := c.client.pollBug()
+ rep2 := c.globalClient.pollBug()
// No commits in commit poll.
commitPollResp, err := c.client.CommitPoll()
@@ -36,7 +36,7 @@ func TestCommitPoll(t *testing.T) {
c.expectEQ(len(commitPollResp.Commits), 0)
// Specify fixing commit for the bug.
- reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep1.ID,
Status: dashapi.BugStatusOpen,
FixCommits: []string{"foo: fix1", "foo: fix2"},
diff --git a/dashboard/app/discussion_test.go b/dashboard/app/discussion_test.go
index bbe87341b..993b38add 100644
--- a/dashboard/app/discussion_test.go
+++ b/dashboard/app/discussion_test.go
@@ -26,19 +26,19 @@ func TestDiscussionAccess(t *testing.T) {
// Bug at the first (AccesUser) stage of reporting.
crash := testCrash(build, 1)
client.ReportCrash(crash)
- rep1 := client.pollBug()
+ rep1 := c.globalClient.pollBug()
// Bug at the second (AccessPublic) stage.
crash2 := testCrash(build, 2)
client.ReportCrash(crash2)
- rep2user := client.pollBug()
- client.updateBug(rep2user.ID, dashapi.BugStatusUpstream, "")
- rep2 := client.pollBug()
+ rep2user := c.globalClient.pollBug()
+ c.globalClient.updateBug(rep2user.ID, dashapi.BugStatusUpstream, "")
+ rep2 := c.globalClient.pollBug()
// Patch to both bugs.
firstTime := timeNow(c.ctx)
c.advanceTime(time.Hour)
- c.expectOK(client.SaveDiscussion(&dashapi.SaveDiscussionReq{
+ c.expectOK(c.globalClient.SaveDiscussion(&dashapi.SaveDiscussionReq{
Discussion: &dashapi.Discussion{
ID: "123",
Source: dashapi.DiscussionLore,
@@ -58,7 +58,7 @@ func TestDiscussionAccess(t *testing.T) {
// Discussion about the second bug.
secondTime := timeNow(c.ctx)
c.advanceTime(time.Hour)
- c.expectOK(client.SaveDiscussion(&dashapi.SaveDiscussionReq{
+ c.expectOK(c.globalClient.SaveDiscussion(&dashapi.SaveDiscussionReq{
Discussion: &dashapi.Discussion{
ID: "456",
Source: dashapi.DiscussionLore,
diff --git a/dashboard/app/email_test.go b/dashboard/app/email_test.go
index 158ef09f8..70117bff8 100644
--- a/dashboard/app/email_test.go
+++ b/dashboard/app/email_test.go
@@ -1141,7 +1141,7 @@ func TestEmailPatchTestingAccess(t *testing.T) {
c.expectNoEmail()
// The patch test job should also not be created.
- pollResp := client.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.ID, "")
}
diff --git a/dashboard/app/fix_test.go b/dashboard/app/fix_test.go
index 9505177e3..8e67bd687 100644
--- a/dashboard/app/fix_test.go
+++ b/dashboard/app/fix_test.go
@@ -28,10 +28,10 @@ func TestFixBasic(t *testing.T) {
needRepro, _ := c.client.NeedRepro(testCrashID(crash1))
c.expectEQ(needRepro, true)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
// Specify fixing commit for the bug.
- reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusOpen,
FixCommits: []string{"foo: fix the crash"},
@@ -48,19 +48,19 @@ func TestFixBasic(t *testing.T) {
c.expectEQ(builderPollResp.PendingCommits[0], "foo: fix the crash")
// Patches must not be reset on other actions.
- c.client.updateBug(rep.ID, dashapi.BugStatusOpen, "")
+ c.globalClient.updateBug(rep.ID, dashapi.BugStatusOpen, "")
// Upstream commands must fail if patches are already present.
// Right course of action is unclear in this situation,
// so this test merely documents the current behavior.
- reply, _ = c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ = c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusUpstream,
})
c.expectEQ(reply.OK, false)
c.client.ReportCrash(crash1)
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
// Upload another build with the commit present.
build2 := testBuild(2)
@@ -74,13 +74,13 @@ func TestFixBasic(t *testing.T) {
// Ensure that a new crash creates a new bug (the old one must be marked as fixed).
c.client.ReportCrash(crash1)
- rep2 := c.client.pollBug()
+ rep2 := c.globalClient.pollBug()
c.expectEQ(rep2.Title, "title1 (2)")
// Regression test: previously upstreamming failed because the new bug had fixing commits.
c.client.ReportCrash(crash1)
- c.client.updateBug(rep2.ID, dashapi.BugStatusUpstream, "")
- c.client.pollBug()
+ c.globalClient.updateBug(rep2.ID, dashapi.BugStatusUpstream, "")
+ c.globalClient.pollBug()
}
// Test bug that is fixed by 2 commits.
@@ -97,10 +97,10 @@ func TestFixedByTwoCommits(t *testing.T) {
builderPollResp, _ := c.client.BuilderPoll(build1.Manager)
c.expectEQ(len(builderPollResp.PendingCommits), 0)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
// Specify fixing commit for the bug.
- reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusOpen,
FixCommits: []string{"bar: prepare for fixing", "\"foo: fix the crash\""},
@@ -126,7 +126,7 @@ func TestFixedByTwoCommits(t *testing.T) {
c.expectEQ(builderPollResp.PendingCommits[1], "foo: fix the crash")
c.client.ReportCrash(crash1)
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
// Now upload build with both commits.
build3 := testBuild(3)
@@ -140,7 +140,7 @@ func TestFixedByTwoCommits(t *testing.T) {
// Ensure that a new crash creates a new bug (the old one must be marked as fixed).
c.client.ReportCrash(crash1)
- rep2 := c.client.pollBug()
+ rep2 := c.globalClient.pollBug()
c.expectEQ(rep2.Title, "title1 (2)")
}
@@ -159,7 +159,7 @@ func TestReFixed(t *testing.T) {
c.expectEQ(len(builderPollResp.PendingCommits), 0)
c.advanceTime(time.Hour)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
bug, _, _ := c.loadBug(rep.ID)
c.expectEQ(bug.LastActivity, c.mockedTime)
@@ -167,7 +167,7 @@ func TestReFixed(t *testing.T) {
// Specify fixing commit for the bug.
c.advanceTime(time.Hour)
- reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusOpen,
FixCommits: []string{"a wrong one"},
@@ -179,7 +179,7 @@ func TestReFixed(t *testing.T) {
c.expectEQ(bug.FixTime, c.mockedTime)
c.advanceTime(time.Hour)
- reply, _ = c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ = c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusOpen,
FixCommits: []string{"the right one"},
@@ -193,7 +193,7 @@ func TestReFixed(t *testing.T) {
// No updates, just check that LastActivity time is updated, FixTime preserved.
fixTime := c.mockedTime
c.advanceTime(time.Hour)
- reply, _ = c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ = c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusOpen,
})
@@ -204,7 +204,7 @@ func TestReFixed(t *testing.T) {
// Send the same fixing commit, check that LastActivity time is updated, FixTime preserved.
c.advanceTime(time.Hour)
- reply, _ = c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ = c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusOpen,
FixCommits: []string{"the right one"},
@@ -230,7 +230,7 @@ func TestReFixed(t *testing.T) {
c.expectEQ(builderPollResp.PendingCommits[0], "the right one")
c.client.ReportCrash(crash1)
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
// Now upload build with the right commit.
build3 := testBuild(3)
@@ -257,10 +257,10 @@ func TestFixTwoManagers(t *testing.T) {
builderPollResp, _ := c.client.BuilderPoll(build1.Manager)
c.expectEQ(len(builderPollResp.PendingCommits), 0)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
// Specify fixing commit for the bug.
- reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusOpen,
FixCommits: []string{"foo: fix the crash"},
@@ -297,7 +297,7 @@ func TestFixTwoManagers(t *testing.T) {
// Check that the bug is still open.
c.client.ReportCrash(crash1)
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
// Now the second manager picks up the commit.
build4 := testBuild(4)
@@ -310,7 +310,7 @@ func TestFixTwoManagers(t *testing.T) {
c.expectEQ(len(builderPollResp.PendingCommits), 0)
c.client.ReportCrash(crash1)
- rep2 := c.client.pollBug()
+ rep2 := c.globalClient.pollBug()
c.expectEQ(rep2.Title, "title1 (2)")
}
@@ -327,10 +327,10 @@ func TestReFixedTwoManagers(t *testing.T) {
builderPollResp, _ := c.client.BuilderPoll(build1.Manager)
c.expectEQ(len(builderPollResp.PendingCommits), 0)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
// Specify fixing commit for the bug.
- reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusOpen,
FixCommits: []string{"foo: fix the crash"},
@@ -351,7 +351,7 @@ func TestReFixedTwoManagers(t *testing.T) {
c.expectEQ(len(builderPollResp.PendingCommits), 0)
// Now we change the fixing commit.
- reply, _ = c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ = c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusOpen,
FixCommits: []string{"the right one"},
@@ -375,10 +375,10 @@ func TestReFixedTwoManagers(t *testing.T) {
// The bug must be still open.
c.client.ReportCrash(crash1)
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
// Specify fixing commit again, but it's the same one as before, so nothing changed.
- reply, _ = c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ = c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusOpen,
FixCommits: []string{"the right one"},
@@ -396,7 +396,7 @@ func TestReFixedTwoManagers(t *testing.T) {
c.expectEQ(len(builderPollResp.PendingCommits), 0)
c.client.ReportCrash(crash1)
- rep2 := c.client.pollBug()
+ rep2 := c.globalClient.pollBug()
c.expectEQ(rep2.Title, "title1 (2)")
}
@@ -414,7 +414,7 @@ func TestFixedWithCommitTags(t *testing.T) {
crash1 := testCrash(build1, 1)
c.client.ReportCrash(crash1)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
// Upload build with 2 fixing commits for this bug.
build1.FixCommits = []dashapi.Commit{
@@ -436,7 +436,7 @@ func TestFixedWithCommitTags(t *testing.T) {
// The bug is still not fixed.
c.client.ReportCrash(crash1)
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
// Now the second manager reports the same commits.
// This must close the bug.
@@ -449,7 +449,7 @@ func TestFixedWithCommitTags(t *testing.T) {
// Ensure that a new crash creates a new bug.
c.client.ReportCrash(crash1)
- rep2 := c.client.pollBug()
+ rep2 := c.globalClient.pollBug()
c.expectEQ(rep2.Title, "title1 (2)")
}
@@ -464,14 +464,14 @@ func TestFixedDup(t *testing.T) {
crash1 := testCrash(build, 1)
c.client.ReportCrash(crash1)
- rep1 := c.client.pollBug()
+ rep1 := c.globalClient.pollBug()
crash2 := testCrash(build, 2)
c.client.ReportCrash(crash2)
- rep2 := c.client.pollBug()
+ rep2 := c.globalClient.pollBug()
// rep2 is a dup of rep1.
- c.client.updateBug(rep2.ID, dashapi.BugStatusDup, rep1.ID)
+ c.globalClient.updateBug(rep2.ID, dashapi.BugStatusDup, rep1.ID)
// Upload build that fixes rep2.
build.FixCommits = []dashapi.Commit{
@@ -481,7 +481,7 @@ func TestFixedDup(t *testing.T) {
// This must fix rep1.
c.client.ReportCrash(crash1)
- rep3 := c.client.pollBug()
+ rep3 := c.globalClient.pollBug()
c.expectEQ(rep3.Title, rep1.Title+" (2)")
}
@@ -499,14 +499,14 @@ func TestFixedDup2(t *testing.T) {
crash1 := testCrash(build1, 1)
c.client.ReportCrash(crash1)
- rep1 := c.client.pollBug()
+ rep1 := c.globalClient.pollBug()
crash2 := testCrash(build1, 2)
c.client.ReportCrash(crash2)
- rep2 := c.client.pollBug()
+ rep2 := c.globalClient.pollBug()
// rep2 is a dup of rep1.
- c.client.updateBug(rep2.ID, dashapi.BugStatusDup, rep1.ID)
+ c.globalClient.updateBug(rep2.ID, dashapi.BugStatusDup, rep1.ID)
// Upload build that fixes rep2.
build1.FixCommits = []dashapi.Commit{
@@ -515,20 +515,20 @@ func TestFixedDup2(t *testing.T) {
c.client.UploadBuild(build1)
// Now undup the bugs. They are still unfixed as only 1 manager uploaded the commit.
- c.client.updateBug(rep2.ID, dashapi.BugStatusOpen, "")
+ c.globalClient.updateBug(rep2.ID, dashapi.BugStatusOpen, "")
// Now the second manager reports the same commits. This must close both bugs.
build2.FixCommits = build1.FixCommits
c.client.UploadBuild(build2)
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
c.advanceTime(24 * time.Hour)
c.client.ReportCrash(crash1)
- rep3 := c.client.pollBug()
+ rep3 := c.globalClient.pollBug()
c.expectEQ(rep3.Title, rep1.Title+" (2)")
c.client.ReportCrash(crash2)
- rep4 := c.client.pollBug()
+ rep4 := c.globalClient.pollBug()
c.expectEQ(rep4.Title, rep2.Title+" (2)")
}
@@ -545,14 +545,14 @@ func TestFixedDup3(t *testing.T) {
crash1 := testCrash(build1, 1)
c.client.ReportCrash(crash1)
- rep1 := c.client.pollBug()
+ rep1 := c.globalClient.pollBug()
crash2 := testCrash(build1, 2)
c.client.ReportCrash(crash2)
- rep2 := c.client.pollBug()
+ rep2 := c.globalClient.pollBug()
// rep2 is a dup of rep1.
- c.client.updateBug(rep2.ID, dashapi.BugStatusDup, rep1.ID)
+ c.globalClient.updateBug(rep2.ID, dashapi.BugStatusDup, rep1.ID)
// Upload builds that fix rep1 and rep2 with different commits.
// This must fix rep1 eventually and we must not livelock in such scenario.
@@ -567,6 +567,6 @@ func TestFixedDup3(t *testing.T) {
c.client.UploadBuild(build2)
c.client.ReportCrash(crash1)
- rep3 := c.client.pollBug()
+ rep3 := c.globalClient.pollBug()
c.expectEQ(rep3.Title, rep1.Title+" (2)")
}
diff --git a/dashboard/app/jobs_test.go b/dashboard/app/jobs_test.go
index 6b9941e6b..cdd646465 100644
--- a/dashboard/app/jobs_test.go
+++ b/dashboard/app/jobs_test.go
@@ -58,7 +58,7 @@ func TestJob(t *testing.T) {
crash.ReproSyz = []byte("repro syz")
crash.ReproC = []byte("repro C")
client.ReportCrash(crash)
- client.pollAndFailBisectJob(build.Manager)
+ c.globalClient.pollAndFailBisectJob(build.Manager)
body = c.pollEmailBug().Body
c.expectEQ(strings.Contains(body, "syzbot has found a reproducer"), true)
@@ -84,7 +84,7 @@ func TestJob(t *testing.T) {
c.incomingEmail(sender, syzTestGitBranchSamplePatch,
EmailOptFrom("\"foo\" <blOcKed@dOmain.COM>"))
c.expectNoEmail()
- pollResp := client.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.ID, "")
// This submits actual test request.
@@ -99,9 +99,9 @@ func TestJob(t *testing.T) {
EmailOptCC([]string{"somebody@else.com", "test@syzkaller.com"}))
c.expectNoEmail()
- pollResp = client.pollJobs("foobar")
+ pollResp = c.globalClient.pollJobs("foobar")
c.expectEQ(pollResp.ID, "")
- pollResp = client.pollJobs(build.Manager)
+ pollResp = c.globalClient.pollJobs(build.Manager)
c.expectNE(pollResp.ID, "")
c.expectEQ(pollResp.Type, dashapi.JobTestPatch)
c.expectEQ(pollResp.Manager, build.Manager)
@@ -124,7 +124,7 @@ func TestJob(t *testing.T) {
CrashLog: []byte("test crash log"),
CrashReport: []byte("test crash report"),
}
- client.JobDone(jobDoneReq)
+ c.globalClient.JobDone(jobDoneReq)
{
dbJob, dbBuild, _ := c.loadJob(pollResp.ID)
@@ -161,14 +161,14 @@ patch: %[1]v
// Testing fails with an error.
c.incomingEmail(sender, syzTestGitBranchSamplePatch, EmailOptMessageID(2))
- pollResp = client.pollJobs(build.Manager)
+ pollResp = c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.Type, dashapi.JobTestPatch)
jobDoneReq = &dashapi.JobDoneReq{
ID: pollResp.ID,
Build: *build,
Error: []byte("failed to apply patch"),
}
- client.JobDone(jobDoneReq)
+ c.globalClient.JobDone(jobDoneReq)
{
dbJob, dbBuild, _ := c.loadJob(pollResp.ID)
patchLink := externalLink(c.ctx, textPatch, dbJob.Patch)
@@ -198,14 +198,14 @@ patch: %[1]v
// Testing fails with a huge error that can't be inlined in email.
c.incomingEmail(sender, syzTestGitBranchSamplePatch, EmailOptMessageID(3))
- pollResp = client.pollJobs(build.Manager)
+ pollResp = c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.Type, dashapi.JobTestPatch)
jobDoneReq = &dashapi.JobDoneReq{
ID: pollResp.ID,
Build: *build,
Error: bytes.Repeat([]byte{'a', 'b', 'c'}, (maxInlineError+100)/3),
}
- client.JobDone(jobDoneReq)
+ c.globalClient.JobDone(jobDoneReq)
{
dbJob, dbBuild, _ := c.loadJob(pollResp.ID)
patchLink := externalLink(c.ctx, textPatch, dbJob.Patch)
@@ -240,14 +240,14 @@ patch: %[3]v
}
c.incomingEmail(sender, syzTestGitBranchSamplePatch, EmailOptMessageID(4))
- pollResp = client.pollJobs(build.Manager)
+ pollResp = c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.Type, dashapi.JobTestPatch)
jobDoneReq = &dashapi.JobDoneReq{
ID: pollResp.ID,
Build: *build,
CrashLog: []byte("console output"),
}
- client.JobDone(jobDoneReq)
+ c.globalClient.JobDone(jobDoneReq)
{
dbJob, dbBuild, _ := c.loadJob(pollResp.ID)
patchLink := externalLink(c.ctx, textPatch, dbJob.Patch)
@@ -278,7 +278,7 @@ Note: testing is done by a robot and is best-effort only.
c.checkURLContents(kernelConfigLink, build.KernelConfig)
}
- pollResp = client.pollJobs(build.Manager)
+ pollResp = c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.ID, "")
}
@@ -301,7 +301,7 @@ func TestBootErrorPatch(t *testing.T) {
c.incomingEmail(report.Sender, syzTestGitBranchSamplePatch,
EmailOptFrom("test@requester.com"), EmailOptCC(report.To))
c.expectNoEmail()
- pollResp := c.client2.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.Type, dashapi.JobTestPatch)
}
@@ -325,7 +325,7 @@ func TestTestErrorPatch(t *testing.T) {
c.incomingEmail(report.Sender, syzTestGitBranchSamplePatch,
EmailOptFrom("test@requester.com"), EmailOptCC(report.To))
c.expectNoEmail()
- pollResp := c.client2.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.Type, dashapi.JobTestPatch)
}
@@ -343,7 +343,7 @@ func TestJobWithoutPatch(t *testing.T) {
crash.ReproOpts = []byte("repro opts")
crash.ReproSyz = []byte("repro syz")
client.ReportCrash(crash)
- client.pollAndFailBisectJob(build.Manager)
+ c.globalClient.pollAndFailBisectJob(build.Manager)
sender := c.pollEmailBug().Sender
_, extBugID, err := email.RemoveAddrContext(sender)
c.expectOK(err)
@@ -353,7 +353,7 @@ func TestJobWithoutPatch(t *testing.T) {
c.incomingEmail(sender, "#syz test git://mygit.com/git.git 5e6a2eea\n", EmailOptMessageID(1))
c.expectNoEmail()
- pollResp := client.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
c.expectNE(pollResp.ID, "")
c.expectEQ(pollResp.Type, dashapi.JobTestPatch)
testBuild := testBuild(2)
@@ -364,7 +364,7 @@ func TestJobWithoutPatch(t *testing.T) {
ID: pollResp.ID,
Build: *testBuild,
}
- client.JobDone(jobDoneReq)
+ c.globalClient.JobDone(jobDoneReq)
{
_, dbBuild, _ := c.loadJob(pollResp.ID)
kernelConfigLink := externalLink(c.ctx, textKernelConfig, dbBuild.KernelConfig)
@@ -391,7 +391,7 @@ Note: testing is done by a robot and is best-effort only.
c.checkURLContents(kernelConfigLink, testBuild.KernelConfig)
}
- pollResp = client.pollJobs(build.Manager)
+ pollResp = c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.ID, "")
}
@@ -436,7 +436,7 @@ func TestReproRetestJob(t *testing.T) {
// Let's say that the C repro testing has failed.
c.advanceTime(c.config().Obsoleting.ReproRetestStart + time.Hour)
for i := 0; i < 2; i++ {
- resp := client.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{TestPatches: true})
+ resp := c.globalClient.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{TestPatches: true})
c.expectEQ(resp.Type, dashapi.JobTestPatch)
c.expectEQ(resp.KernelRepo, build.KernelRepo)
c.expectEQ(resp.KernelBranch, build.KernelBranch)
@@ -457,7 +457,7 @@ func TestReproRetestJob(t *testing.T) {
ID: resp.ID,
}
}
- client.expectOK(client.JobDone(done))
+ client.expectOK(c.globalClient.JobDone(done))
}
// Expect that the repro level is no longer ReproLevelC.
c.expectNoEmail()
@@ -466,7 +466,7 @@ func TestReproRetestJob(t *testing.T) {
// Let's also deprecate the syz repro.
c.advanceTime(c.config().Obsoleting.ReproRetestPeriod + time.Hour)
- resp := client.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{TestPatches: true})
+ resp := c.globalClient.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{TestPatches: true})
c.expectEQ(resp.Type, dashapi.JobTestPatch)
c.expectEQ(resp.KernelBranch, build.KernelBranch)
c.expectEQ(resp.ReproC, []uint8(nil))
@@ -474,7 +474,7 @@ func TestReproRetestJob(t *testing.T) {
done := &dashapi.JobDoneReq{
ID: resp.ID,
}
- client.expectOK(client.JobDone(done))
+ client.expectOK(c.globalClient.JobDone(done))
// Expect that the repro level is no longer ReproLevelC.
bug, _, _ = c.loadBug(extBugID)
c.expectEQ(bug.HeadReproLevel, ReproLevelNone)
@@ -533,7 +533,7 @@ func TestDelegatedManagerReproRetest(t *testing.T) {
// Let's say that the C repro testing has failed.
c.advanceTime(c.config().Obsoleting.ReproRetestPeriod + time.Hour)
- resp := client.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{TestPatches: true})
+ resp := c.globalClient.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{TestPatches: true})
c.expectEQ(resp.Type, dashapi.JobTestPatch)
c.expectEQ(resp.KernelRepo, build.KernelRepo)
c.expectEQ(resp.KernelBranch, build.KernelBranch)
@@ -545,7 +545,7 @@ func TestDelegatedManagerReproRetest(t *testing.T) {
ID: resp.ID,
}
- client.expectOK(client.JobDone(done))
+ client.expectOK(c.globalClient.JobDone(done))
// If it has worked, the repro is revoked and the bug is obsoleted.
c.pollEmailBug()
@@ -567,19 +567,19 @@ func TestJobRestrictedManager(t *testing.T) {
crash := testCrash(build, 1)
crash.ReproSyz = []byte("repro syz")
client.ReportCrash(crash)
- client.pollAndFailBisectJob(build.Manager)
+ c.globalClient.pollAndFailBisectJob(build.Manager)
sender := c.pollEmailBug().Sender
// Testing on a wrong repo must fail and no test jobs passed to manager.
c.incomingEmail(sender, "#syz test: git://mygit.com/git.git master\n", EmailOptMessageID(1))
reply := c.pollEmailBug()
c.expectEQ(strings.Contains(reply.Body, "you should test only on restricted.git"), true)
- pollResp := client.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.ID, "")
// Testing on the right repo must succeed.
c.incomingEmail(sender, "#syz test: git://restricted.git/restricted.git master\n", EmailOptMessageID(2))
- pollResp = client.pollJobs(build.Manager)
+ pollResp = c.globalClient.pollJobs(build.Manager)
c.expectNE(pollResp.ID, "")
c.expectEQ(pollResp.Type, dashapi.JobTestPatch)
c.expectEQ(pollResp.Manager, build.Manager)
@@ -599,17 +599,17 @@ func TestBisectFixJob(t *testing.T) {
c.client2.pollEmailBug()
// Receive the JobBisectCause.
- resp := c.client2.pollJobs(build.Manager)
+ resp := c.globalClient.pollJobs(build.Manager)
c.client2.expectNE(resp.ID, "")
c.client2.expectEQ(resp.Type, dashapi.JobBisectCause)
done := &dashapi.JobDoneReq{
ID: resp.ID,
Error: []byte("testBisectFixJob:JobBisectCause"),
}
- c.client2.expectOK(c.client2.JobDone(done))
+ c.client2.expectOK(c.globalClient.JobDone(done))
// Ensure no more jobs.
- resp = c.client2.pollJobs(build.Manager)
+ resp = c.globalClient.pollJobs(build.Manager)
c.client2.expectEQ(resp.ID, "")
// Advance time by 30 days and read out any notification emails.
@@ -625,14 +625,14 @@ func TestBisectFixJob(t *testing.T) {
}
// Ensure that we get a JobBisectFix.
- resp = c.client2.pollJobs(build.Manager)
+ resp = c.globalClient.pollJobs(build.Manager)
c.client2.expectNE(resp.ID, "")
c.client2.expectEQ(resp.Type, dashapi.JobBisectFix)
done = &dashapi.JobDoneReq{
ID: resp.ID,
Error: []byte("testBisectFixJob:JobBisectFix"),
}
- c.client2.expectOK(c.client2.JobDone(done))
+ c.client2.expectOK(c.globalClient.JobDone(done))
}
// Test that JobBisectFix jobs are re-tried if crash occurs on ToT.
@@ -648,14 +648,14 @@ func TestBisectFixRetry(t *testing.T) {
c.client2.pollEmailBug()
// Receive the JobBisectCause.
- resp := c.client2.pollJobs(build.Manager)
+ resp := c.globalClient.pollJobs(build.Manager)
c.client2.expectNE(resp.ID, "")
c.client2.expectEQ(resp.Type, dashapi.JobBisectCause)
done := &dashapi.JobDoneReq{
ID: resp.ID,
Error: []byte("testBisectFixRetry:JobBisectCause"),
}
- c.client2.expectOK(c.client2.JobDone(done))
+ c.client2.expectOK(c.globalClient.JobDone(done))
// Advance time by 30 days and read out any notification emails.
{
@@ -670,7 +670,7 @@ func TestBisectFixRetry(t *testing.T) {
}
// Ensure that we get a JobBisectFix. We send back a crashlog, no error, no commits.
- resp = c.client2.pollJobs(build.Manager)
+ resp = c.globalClient.pollJobs(build.Manager)
c.client2.expectNE(resp.ID, "")
c.client2.expectEQ(resp.Type, dashapi.JobBisectFix)
done = &dashapi.JobDoneReq{
@@ -681,7 +681,7 @@ func TestBisectFixRetry(t *testing.T) {
CrashLog: []byte("this is a crashlog"),
CrashReport: []byte("this is a crashreport"),
}
- c.client2.expectOK(c.client2.JobDone(done))
+ c.client2.expectOK(c.globalClient.JobDone(done))
// Advance time by 30 days. No notification emails.
{
@@ -689,14 +689,14 @@ func TestBisectFixRetry(t *testing.T) {
}
// Ensure that we get a JobBisectFix retry.
- resp = c.client2.pollJobs(build.Manager)
+ resp = c.globalClient.pollJobs(build.Manager)
c.client2.expectNE(resp.ID, "")
c.client2.expectEQ(resp.Type, dashapi.JobBisectFix)
done = &dashapi.JobDoneReq{
ID: resp.ID,
Error: []byte("testBisectFixRetry:JobBisectFix"),
}
- c.client2.expectOK(c.client2.JobDone(done))
+ c.client2.expectOK(c.globalClient.JobDone(done))
}
// Test that bisection results are not reported for bugs that are already marked as fixed.
@@ -712,14 +712,14 @@ func TestNotReportingAlreadyFixed(t *testing.T) {
c.client2.pollEmailBug()
// Receive the JobBisectCause.
- resp := c.client2.pollJobs(build.Manager)
+ resp := c.globalClient.pollJobs(build.Manager)
c.client2.expectNE(resp.ID, "")
c.client2.expectEQ(resp.Type, dashapi.JobBisectCause)
done := &dashapi.JobDoneReq{
ID: resp.ID,
Error: []byte("testBisectFixRetry:JobBisectCause"),
}
- c.client2.expectOK(c.client2.JobDone(done))
+ c.client2.expectOK(c.globalClient.JobDone(done))
sender := ""
// Advance time by 30 days and read out any notification emails.
@@ -736,7 +736,7 @@ func TestNotReportingAlreadyFixed(t *testing.T) {
}
// Poll for a BisectFix job.
- resp = c.client2.pollJobs(build.Manager)
+ resp = c.globalClient.pollJobs(build.Manager)
c.client2.expectNE(resp.ID, "")
c.client2.expectEQ(resp.Type, dashapi.JobBisectFix)
@@ -774,7 +774,7 @@ func TestNotReportingAlreadyFixed(t *testing.T) {
},
},
}
- c.expectOK(c.client2.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
// No reporting should come in at this point. If there is reporting, c.Close()
// will fail.
@@ -794,14 +794,14 @@ func TestFixBisectionsListed(t *testing.T) {
c.client2.pollEmailBug()
// Receive the JobBisectCause.
- resp := c.client2.pollJobs(build.Manager)
+ resp := c.globalClient.pollJobs(build.Manager)
c.client2.expectNE(resp.ID, "")
c.client2.expectEQ(resp.Type, dashapi.JobBisectCause)
done := &dashapi.JobDoneReq{
ID: resp.ID,
Error: []byte("testBisectFixRetry:JobBisectCause"),
}
- c.client2.expectOK(c.client2.JobDone(done))
+ c.client2.expectOK(c.globalClient.JobDone(done))
// At this point, no fix bisections should be listed out.
var bugs []*Bug
@@ -827,7 +827,7 @@ func TestFixBisectionsListed(t *testing.T) {
// Ensure that we get a JobBisectFix. We send back a crashlog, no error,
// no commits.
- resp = c.client2.pollJobs(build.Manager)
+ resp = c.globalClient.pollJobs(build.Manager)
c.client2.expectNE(resp.ID, "")
c.client2.expectEQ(resp.Type, dashapi.JobBisectFix)
done = &dashapi.JobDoneReq{
@@ -840,7 +840,7 @@ func TestFixBisectionsListed(t *testing.T) {
CrashReport: []byte("this is a crashreport"),
Log: []byte("this is a log"),
}
- c.client2.expectOK(c.client2.JobDone(done))
+ c.client2.expectOK(c.globalClient.JobDone(done))
// Check the bug page and ensure that a bisection is listed out.
content, err = c.GET(url)
@@ -853,14 +853,14 @@ func TestFixBisectionsListed(t *testing.T) {
}
// Ensure that we get a JobBisectFix retry.
- resp = c.client2.pollJobs(build.Manager)
+ resp = c.globalClient.pollJobs(build.Manager)
c.client2.expectNE(resp.ID, "")
c.client2.expectEQ(resp.Type, dashapi.JobBisectFix)
done = &dashapi.JobDoneReq{
ID: resp.ID,
Error: []byte("testBisectFixRetry:JobBisectFix"),
}
- c.client2.expectOK(c.client2.JobDone(done))
+ c.client2.expectOK(c.globalClient.JobDone(done))
// Check the bug page and ensure that no bisections are listed out.
content, err = c.GET(url)
@@ -882,14 +882,14 @@ func TestFixBisectionsDisabled(t *testing.T) {
c.client2.pollEmailBug()
// Receive the JobBisectCause.
- resp := c.client2.pollJobs(build.Manager)
+ resp := c.globalClient.pollJobs(build.Manager)
c.client2.expectNE(resp.ID, "")
c.client2.expectEQ(resp.Type, dashapi.JobBisectCause)
done := &dashapi.JobDoneReq{
ID: resp.ID,
Error: []byte("testBisectFixRetry:JobBisectCause"),
}
- c.client2.expectOK(c.client2.JobDone(done))
+ c.client2.expectOK(c.globalClient.JobDone(done))
// Advance time by 30 days and read out any notification emails.
{
@@ -904,7 +904,7 @@ func TestFixBisectionsDisabled(t *testing.T) {
}
// Ensure that we do not get a JobBisectFix.
- resp = c.client2.pollJobs(build.Manager)
+ resp = c.globalClient.pollJobs(build.Manager)
c.client2.expectEQ(resp.ID, "")
}
@@ -922,12 +922,12 @@ func TestExternalPatchFlow(t *testing.T) {
client.ReportCrash(crash)
// Confirm the report.
- reports, err := client.ReportingPollBugs("test")
+ reports, err := c.globalClient.ReportingPollBugs("test")
origReport := reports.Reports[0]
c.expectOK(err)
c.expectEQ(len(reports.Reports), 1)
- reply, _ := client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: origReport.ID,
Status: dashapi.BugStatusOpen,
})
@@ -935,7 +935,7 @@ func TestExternalPatchFlow(t *testing.T) {
client.expectEQ(reply.OK, true)
// Create a new patch testing job.
- ret, err := client.NewTestJob(&dashapi.TestPatchRequest{
+ ret, err := c.globalClient.NewTestJob(&dashapi.TestPatchRequest{
BugID: origReport.ID,
Link: "http://some-link.com/",
User: "developer@kernel.org",
@@ -947,7 +947,7 @@ func TestExternalPatchFlow(t *testing.T) {
c.expectEQ(ret.ErrorText, "")
// Make sure the job will be passed to the job processor.
- pollResp := c.client2.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.Type, dashapi.JobTestPatch)
c.expectEQ(pollResp.KernelRepo, "git://git.git/git.git")
c.expectEQ(pollResp.KernelBranch, "kernel-branch")
@@ -962,11 +962,11 @@ func TestExternalPatchFlow(t *testing.T) {
CrashLog: []byte("test crash log"),
CrashReport: []byte("test crash report"),
}
- err = c.client2.JobDone(jobDoneReq)
+ err = c.globalClient.JobDone(jobDoneReq)
c.expectOK(err)
// Verify that we do get the bug update about the completed request.
- jobDoneUpdates, err := client.ReportingPollBugs("test")
+ jobDoneUpdates, err := c.globalClient.ReportingPollBugs("test")
c.expectOK(err)
c.expectEQ(len(jobDoneUpdates.Reports), 1)
@@ -976,7 +976,7 @@ func TestExternalPatchFlow(t *testing.T) {
c.expectEQ(newReport.Report, []byte("test crash report"))
// Confirm the patch testing result.
- reply, _ = client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ = c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: origReport.ID,
JobID: pollResp.ID,
Status: dashapi.BugStatusOpen,
@@ -999,12 +999,12 @@ func TestExternalPatchTestError(t *testing.T) {
client.ReportCrash(crash)
// Confirm the report.
- reports, err := client.ReportingPollBugs("test")
+ reports, err := c.globalClient.ReportingPollBugs("test")
origReport := reports.Reports[0]
c.expectOK(err)
c.expectEQ(len(reports.Reports), 1)
- reply, _ := client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: origReport.ID,
Status: dashapi.BugStatusOpen,
})
@@ -1012,7 +1012,7 @@ func TestExternalPatchTestError(t *testing.T) {
client.expectEQ(reply.OK, true)
// Create a new patch testing job.
- ret, err := client.NewTestJob(&dashapi.TestPatchRequest{
+ ret, err := c.globalClient.NewTestJob(&dashapi.TestPatchRequest{
BugID: origReport.ID,
User: "developer@kernel.org",
Branch: "kernel-branch",
@@ -1038,12 +1038,12 @@ func TestExternalPatchCompletion(t *testing.T) {
client.ReportCrash(crash)
// Confirm the report.
- reports, err := client.ReportingPollBugs("test")
+ reports, err := c.globalClient.ReportingPollBugs("test")
origReport := reports.Reports[0]
c.expectOK(err)
c.expectEQ(len(reports.Reports), 1)
- reply, _ := client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: origReport.ID,
Status: dashapi.BugStatusOpen,
})
@@ -1051,7 +1051,7 @@ func TestExternalPatchCompletion(t *testing.T) {
client.expectEQ(reply.OK, true)
// Create a new patch testing job.
- ret, err := client.NewTestJob(&dashapi.TestPatchRequest{
+ ret, err := c.globalClient.NewTestJob(&dashapi.TestPatchRequest{
BugID: origReport.ID,
User: "developer@kernel.org",
Patch: []byte(sampleGitPatch),
@@ -1060,7 +1060,7 @@ func TestExternalPatchCompletion(t *testing.T) {
c.expectEQ(ret.ErrorText, "")
// Make sure branch and repo are correct.
- pollResp := c.client2.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.KernelRepo, build.KernelRepo)
c.expectEQ(pollResp.KernelBranch, build.KernelBranch)
}
@@ -1079,12 +1079,12 @@ func TestParallelJobs(t *testing.T) {
client.ReportCrash(crash)
// Confirm the report.
- reports, err := client.ReportingPollBugs("test")
+ reports, err := c.globalClient.ReportingPollBugs("test")
origReport := reports.Reports[0]
c.expectOK(err)
c.expectEQ(len(reports.Reports), 1)
- reply, _ := client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: origReport.ID,
Status: dashapi.BugStatusOpen,
})
@@ -1104,48 +1104,48 @@ func TestParallelJobs(t *testing.T) {
Repo: repo1,
Patch: []byte(sampleGitPatch),
}
- ret, err := client.NewTestJob(testPatchReq)
+ ret, err := c.globalClient.NewTestJob(testPatchReq)
c.expectOK(err)
c.expectEQ(ret.ErrorText, "")
// Make sure the job will be passed to the job processor.
- pollResp := client.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.Type, dashapi.JobTestPatch)
c.expectEQ(pollResp.KernelRepo, repo1)
// This job is already taken, there are no other jobs.
- emptyPollResp := client.pollJobs(build.Manager)
+ emptyPollResp := c.globalClient.pollJobs(build.Manager)
c.expectEQ(emptyPollResp, &dashapi.JobPollResp{})
// Create another job.
testPatchReq.Repo = repo2
- ret, err = client.NewTestJob(testPatchReq)
+ ret, err = c.globalClient.NewTestJob(testPatchReq)
c.expectOK(err)
c.expectEQ(ret.ErrorText, "")
// Make sure the new job will be passed to the job processor.
- pollResp = client.pollJobs(build.Manager)
+ pollResp = c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.Type, dashapi.JobTestPatch)
c.expectEQ(pollResp.KernelRepo, repo2)
// .. and then there'll be no other jobs.
- emptyPollResp = client.pollJobs(build.Manager)
+ emptyPollResp = c.globalClient.pollJobs(build.Manager)
c.expectEQ(emptyPollResp, &dashapi.JobPollResp{})
// Emulate a syz-ci restart.
- client.JobReset(&dashapi.JobResetReq{Managers: []string{build.Manager}})
+ c.globalClient.JobReset(&dashapi.JobResetReq{Managers: []string{build.Manager}})
// .. and re-query both jobs.
repos := []string{}
for i := 0; i < 2; i++ {
- pollResp = client.pollJobs(build.Manager)
+ pollResp = c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.Type, dashapi.JobTestPatch)
repos = append(repos, pollResp.KernelRepo)
}
assert.ElementsMatch(t, repos, []string{repo1, repo2}, "two patch testing requests are expected")
// .. but nothing else is to be expected.
- emptyPollResp = client.pollJobs(build.Manager)
+ emptyPollResp = c.globalClient.pollJobs(build.Manager)
c.expectEQ(emptyPollResp, &dashapi.JobPollResp{})
// Emulate the job's completion.
@@ -1157,12 +1157,12 @@ func TestParallelJobs(t *testing.T) {
CrashLog: []byte("test crash log"),
CrashReport: []byte("test crash report"),
}
- err = client.JobDone(jobDoneReq)
+ err = c.globalClient.JobDone(jobDoneReq)
c.expectOK(err)
- client.pollBugs(1)
+ c.globalClient.pollBugs(1)
// .. and make sure it doesn't appear again.
- emptyPollResp = client.pollJobs(build.Manager)
+ emptyPollResp = c.globalClient.pollJobs(build.Manager)
c.expectEQ(emptyPollResp, &dashapi.JobPollResp{})
}
@@ -1185,7 +1185,7 @@ func TestJobCauseRetry(t *testing.T) {
client.pollEmailBug() // New report.
// Emulate an infra failure.
- resp := client.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{
+ resp := c.globalClient.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{
BisectCause: true,
})
client.expectNE(resp.ID, "")
@@ -1195,19 +1195,19 @@ func TestJobCauseRetry(t *testing.T) {
Error: []byte("infra problem"),
Flags: dashapi.BisectResultInfraError,
}
- client.expectOK(client.JobDone(done))
+ client.expectOK(c.globalClient.JobDone(done))
c.expectNoEmail()
// Ensure we don't recreate the job right away.
c.advanceTime(24 * time.Hour)
- resp = client.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{
+ resp = c.globalClient.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{
BisectCause: true,
})
client.expectEQ(resp.ID, "")
// Wait the end of the freeze period.
c.advanceTime(7 * 24 * time.Hour)
- resp = client.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{
+ resp = c.globalClient.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{
BisectCause: true,
})
client.expectNE(resp.ID, "")
@@ -1231,7 +1231,7 @@ func TestJobCauseRetry(t *testing.T) {
},
}
done.Build.ID = resp.ID
- c.expectOK(client.JobDone(done))
+ c.expectOK(c.globalClient.JobDone(done))
msg := c.pollEmailBug()
c.expectTrue(strings.Contains(msg.Body, "syzbot has bisected this issue to:"))
@@ -1257,7 +1257,7 @@ func TestEmailTestCommandNoArgs(t *testing.T) {
c.incomingEmail(sender, "#syz test\n"+sampleGitPatch,
EmailOptFrom("test@requester.com"), EmailOptCC([]string{mailingList}))
c.expectNoEmail()
- pollResp := client.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.Type, dashapi.JobTestPatch)
c.expectEQ(pollResp.KernelRepo, build.KernelRepo)
c.expectEQ(pollResp.KernelBranch, build.KernelBranch)
@@ -1278,11 +1278,11 @@ func TestAliasPatchTestingJob(t *testing.T) {
client.ReportCrash(crash)
// Confirm the report.
- reports, err := client.ReportingPollBugs("test")
+ reports, err := c.globalClient.ReportingPollBugs("test")
origReport := reports.Reports[0]
c.expectOK(err)
- reply, _ := client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: origReport.ID,
Status: dashapi.BugStatusOpen,
})
@@ -1290,7 +1290,7 @@ func TestAliasPatchTestingJob(t *testing.T) {
client.expectEQ(reply.OK, true)
// Create a new patch testing job.
- _, err = client.NewTestJob(&dashapi.TestPatchRequest{
+ _, err = c.globalClient.NewTestJob(&dashapi.TestPatchRequest{
BugID: origReport.ID,
User: "developer@kernel.org",
Branch: "some-branch",
@@ -1300,7 +1300,7 @@ func TestAliasPatchTestingJob(t *testing.T) {
c.expectOK(err)
// Make sure branch and repo are correct.
- pollResp := c.client2.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
c.expectEQ(pollResp.KernelRepo, "git://syzkaller.org")
c.expectEQ(pollResp.KernelBranch, "some-branch")
}
diff --git a/dashboard/app/linux_reporting_test.go b/dashboard/app/linux_reporting_test.go
index 9b1ce697c..14376c716 100644
--- a/dashboard/app/linux_reporting_test.go
+++ b/dashboard/app/linux_reporting_test.go
@@ -69,8 +69,8 @@ func TestFsSubsystemFlow(t *testing.T) {
client.ReportCrash(crash)
// As there's no other information, the bug is left at the first reporting.
- c.client.pollNotifs(0)
- vfsBug := client.pollBug()
+ c.globalClient.pollNotifs(0)
+ vfsBug := c.globalClient.pollBug()
// D. Now report a reproducer for the (C) bug that does image mounting.
// -----------------------------------------
@@ -89,8 +89,8 @@ renameat2(r0, &(0x7f00000004c0)='./file0\x00', r0, &(0x7f0000000500)='./bus/file
client.ReportCrash(crash)
// Check that we're ready for upstreaming.
- c.client.pollNotifs(1)
- client.updateBug(vfsBug.ID, dashapi.BugStatusUpstream, "")
+ c.globalClient.pollNotifs(1)
+ c.globalClient.updateBug(vfsBug.ID, dashapi.BugStatusUpstream, "")
// .. and poll the email.
reply = c.pollEmailBug()
c.expectEQ(reply.Subject, "[syzbot] [ntfs3?] WARNING in do_mkdirat")
@@ -127,8 +127,8 @@ func TestVfsSubsystemFlow(t *testing.T) {
client.ReportCrash(crash)
// As there's no other information, the bug is left at the first reporting.
- c.client.pollNotifs(0)
- vfsBug := client.pollBug()
+ c.globalClient.pollNotifs(0)
+ vfsBug := c.globalClient.pollBug()
// B. Now report a reproducer for the (C) bug that does NO image mounting.
// -----------------------------------------
@@ -145,8 +145,8 @@ renameat2(r0, &(0x7f00000004c0)='./file0\x00', r0, &(0x7f0000000500)='./bus/file
client.ReportCrash(crash)
// Check that we're ready for upstreaming.
- c.client.pollNotifs(1)
- client.updateBug(vfsBug.ID, dashapi.BugStatusUpstream, "")
+ c.globalClient.pollNotifs(1)
+ c.globalClient.updateBug(vfsBug.ID, dashapi.BugStatusUpstream, "")
// .. and poll the email.
reply := c.pollEmailBug()
c.expectEQ(reply.Subject, "[syzbot] [fs?] WARNING in do_mkdirat2")
diff --git a/dashboard/app/main_test.go b/dashboard/app/main_test.go
index bbca35dae..e98cf601e 100644
--- a/dashboard/app/main_test.go
+++ b/dashboard/app/main_test.go
@@ -63,9 +63,9 @@ func TestOnlyManagerFilter(t *testing.T) {
}
// Invalidate all these bugs.
- polledBugs := client.pollBugs(3)
+ polledBugs := c.globalClient.pollBugs(3)
for _, bug := range polledBugs {
- client.updateBug(bug.ID, dashapi.BugStatusInvalid, "")
+ c.globalClient.updateBug(bug.ID, dashapi.BugStatusInvalid, "")
}
// Verify that the filtering works on the invalid bugs page.
@@ -104,7 +104,7 @@ func TestSubsystemFilterMain(t *testing.T) {
crash2.GuiltyFiles = []string{"b.c"}
client.ReportCrash(crash2)
- client.pollBugs(2)
+ c.globalClient.pollBugs(2)
// Make sure all those bugs are present on the main page.
reply, err := c.AuthGET(AccessAdmin, "/test1")
c.expectOK(err)
@@ -145,9 +145,9 @@ func TestSubsystemFilterTerminal(t *testing.T) {
client.ReportCrash(crash2)
// Invalidate all these bugs.
- polledBugs := client.pollBugs(2)
+ polledBugs := c.globalClient.pollBugs(2)
for _, bug := range polledBugs {
- client.updateBug(bug.ID, dashapi.BugStatusInvalid, "")
+ c.globalClient.updateBug(bug.ID, dashapi.BugStatusInvalid, "")
}
// Verify that the filtering works on the invalid bugs page.
@@ -175,7 +175,7 @@ func TestMainBugFilters(t *testing.T) {
crash1 := testCrash(build1, 1)
crash1.Title = "my-crash-title"
client.ReportCrash(crash1)
- client.pollBugs(1)
+ c.globalClient.pollBugs(1)
// The normal main page.
reply, err := c.AuthGET(AccessAdmin, "/test1")
@@ -205,12 +205,12 @@ func TestSubsystemsList(t *testing.T) {
crash1 := testCrash(build, 1)
crash1.GuiltyFiles = []string{"a.c"}
client.ReportCrash(crash1)
- client.pollBug()
+ c.globalClient.pollBug()
crash2 := testCrash(build, 2)
crash2.GuiltyFiles = []string{"b.c"}
client.ReportCrash(crash2)
- client.updateBug(client.pollBug().ID, dashapi.BugStatusInvalid, "")
+ c.globalClient.updateBug(c.globalClient.pollBug().ID, dashapi.BugStatusInvalid, "")
_, err := c.AuthGET(AccessUser, "/cron/refresh_subsystems")
c.expectOK(err)
@@ -238,13 +238,13 @@ func TestSubsystemPage(t *testing.T) {
crash1.Title = "test crash title"
crash1.GuiltyFiles = []string{"a.c"}
client.ReportCrash(crash1)
- client.pollBug()
+ c.globalClient.pollBug()
crash2 := testCrash(build, 2)
crash2.GuiltyFiles = []string{"b.c"}
client.ReportCrash(crash2)
crash2.Title = "crash that must not be present"
- client.updateBug(client.pollBug().ID, dashapi.BugStatusInvalid, "")
+ c.globalClient.updateBug(c.globalClient.pollBug().ID, dashapi.BugStatusInvalid, "")
reply, err := c.AuthGET(AccessAdmin, "/test1/s/subsystemA")
c.expectOK(err)
@@ -319,7 +319,7 @@ func TestAdminJobList(t *testing.T) {
c.advanceTime(24 * time.Hour)
- pollResp := client.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{BisectCause: true})
+ pollResp := c.globalClient.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{BisectCause: true})
c.expectNE(pollResp.ID, "")
causeJobsLink := "/admin?job_type=1"
diff --git a/dashboard/app/notifications_test.go b/dashboard/app/notifications_test.go
index ec048a844..da94d3f31 100644
--- a/dashboard/app/notifications_test.go
+++ b/dashboard/app/notifications_test.go
@@ -366,17 +366,17 @@ func TestExtNotifUpstreamEmbargo(t *testing.T) {
crash1 := testCrash(build1, 1)
c.client.ReportCrash(crash1)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
// Specify fixing commit for the bug.
- reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusOpen,
})
c.expectEQ(reply.OK, true)
- c.client.pollNotifs(0)
+ c.globalClient.pollNotifs(0)
c.advanceTime(20 * 24 * time.Hour)
- notif := c.client.pollNotifs(1)[0]
+ notif := c.globalClient.pollNotifs(1)[0]
c.expectEQ(notif.ID, rep.ID)
c.expectEQ(notif.Type, dashapi.BugNotifUpstream)
}
@@ -390,15 +390,15 @@ func TestExtNotifUpstreamOnHold(t *testing.T) {
crash1 := testCrash(build1, 1)
c.client.ReportCrash(crash1)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
// Specify fixing commit for the bug.
- reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusOpen,
OnHold: true,
})
c.expectEQ(reply.OK, true)
c.advanceTime(20 * 24 * time.Hour)
- c.client.pollNotifs(0)
+ c.globalClient.pollNotifs(0)
}
diff --git a/dashboard/app/public_json_api_test.go b/dashboard/app/public_json_api_test.go
index f98c0d3f8..7009f2318 100644
--- a/dashboard/app/public_json_api_test.go
+++ b/dashboard/app/public_json_api_test.go
@@ -109,17 +109,17 @@ func TestJSONAPIIntegration(t *testing.T) {
crash1 := testCrash(build, 1)
c.advanceTime(time.Minute)
c.client.ReportCrash(crash1)
- bugReport1 := c.client.pollBug()
+ bugReport1 := c.globalClient.pollBug()
checkBugPageJSONIs(c, bugReport1.ID, sampleCrashDescr)
crash2 := testCrashWithRepro(build, 2)
c.client.ReportCrash(crash2)
- bugReport2 := c.client.pollBug()
+ bugReport2 := c.globalClient.pollBug()
checkBugPageJSONIs(c, bugReport2.ID, sampleCrashWithReproDescr)
checkBugGroupPageJSONIs(c, "/test1?json=1", sampleOpenBugGroupDescr)
- c.client.ReportingUpdate(&dashapi.BugUpdate{
+ c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: bugReport2.ID,
Status: dashapi.BugStatusOpen,
FixCommits: []string{"foo: fix1", "foo: fix2"},
@@ -157,11 +157,11 @@ func TestJSONAPIFixCommits(t *testing.T) {
crash1 := testCrash(build1, 1)
c.client.ReportCrash(crash1)
- rep1 := c.client.pollBug()
+ rep1 := c.globalClient.pollBug()
// Specify fixing commit for the bug.
c.advanceTime(time.Hour)
- c.client.ReportingUpdate(&dashapi.BugUpdate{
+ c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep1.ID,
Status: dashapi.BugStatusOpen,
FixCommits: []string{"foo: fix1", "foo: fix2"},
@@ -258,9 +258,9 @@ func TestPublicJSONAPI(t *testing.T) {
build := testBuild(1)
client.UploadBuild(build)
client.ReportCrash(testCrashWithRepro(build, 1))
- rep := client.pollBug()
- client.updateBug(rep.ID, dashapi.BugStatusUpstream, "")
- _ = client.pollBug()
+ rep := c.globalClient.pollBug()
+ c.globalClient.updateBug(rep.ID, dashapi.BugStatusUpstream, "")
+ _ = c.globalClient.pollBug()
cli := c.makeAPIClient()
bugs, err := cli.BugGroups("access-public", api.BugGroupAll)
diff --git a/dashboard/app/reporting_test.go b/dashboard/app/reporting_test.go
index 6f4441583..b603955c0 100644
--- a/dashboard/app/reporting_test.go
+++ b/dashboard/app/reporting_test.go
@@ -42,11 +42,11 @@ func TestReportBug(t *testing.T) {
c.client.ReportCrash(crash1)
// Must get no reports for "unknown" type.
- resp, _ := c.client.ReportingPollBugs("unknown")
+ resp, _ := c.globalClient.ReportingPollBugs("unknown")
c.expectEQ(len(resp.Reports), 0)
// Must get a proper report for "test" type.
- resp, _ = c.client.ReportingPollBugs("test")
+ resp, _ = c.globalClient.ReportingPollBugs("test")
c.expectEQ(len(resp.Reports), 1)
rep := resp.Reports[0]
c.expectNE(rep.ID, "")
@@ -104,7 +104,7 @@ func TestReportBug(t *testing.T) {
c.expectEQ(want, rep)
// Since we did not update bug status yet, should get the same report again.
- c.expectEQ(c.client.pollBug(), want)
+ c.expectEQ(c.globalClient.pollBug(), want)
// Now add syz repro and check that we get another bug report.
crash1.ReproOpts = []byte("some opts")
@@ -114,7 +114,7 @@ func TestReportBug(t *testing.T) {
want.ReproSyz = []byte(syzReproPrefix + "#some opts\ngetpid()")
want.ReproOpts = []byte("some opts")
c.client.ReportCrash(crash1)
- rep1 := c.client.pollBug()
+ rep1 := c.globalClient.pollBug()
c.expectNE(want.CrashID, rep1.CrashID)
_, dbCrash, _ = c.loadBug(rep.ID)
want.CrashID = rep1.CrashID
@@ -124,7 +124,7 @@ func TestReportBug(t *testing.T) {
want.ReportLink = externalLink(c.ctx, textCrashReport, dbCrash.Report)
c.expectEQ(want, rep1)
- reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusOpen,
ReproLevel: dashapi.ReproLevelSyz,
@@ -132,13 +132,13 @@ func TestReportBug(t *testing.T) {
c.expectEQ(reply.OK, true)
// After bug update should not get the report again.
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
// Now close the bug in the first reporting.
- c.client.updateBug(rep.ID, dashapi.BugStatusUpstream, "")
+ c.globalClient.updateBug(rep.ID, dashapi.BugStatusUpstream, "")
// Check that bug updates for the first reporting fail now.
- reply, _ = c.client.ReportingUpdate(&dashapi.BugUpdate{ID: rep.ID, Status: dashapi.BugStatusOpen})
+ reply, _ = c.globalClient.ReportingUpdate(&dashapi.BugUpdate{ID: rep.ID, Status: dashapi.BugStatusOpen})
c.expectEQ(reply.OK, false)
// Report another crash with syz repro for this bug,
@@ -147,7 +147,7 @@ func TestReportBug(t *testing.T) {
c.client.ReportCrash(crash1)
// Check that we get the report in the second reporting.
- rep2 := c.client.pollBug()
+ rep2 := c.globalClient.pollBug()
c.expectNE(rep2.ID, "")
c.expectNE(rep2.ID, rep.ID)
want.Type = dashapi.ReportNew
@@ -167,7 +167,7 @@ func TestReportBug(t *testing.T) {
c.expectEQ(want, rep2)
// Check that that we can't upstream the bug in the final reporting.
- reply, _ = c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ = c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep2.ID,
Status: dashapi.BugStatusUpstream,
})
@@ -184,10 +184,10 @@ func TestInvalidBug(t *testing.T) {
crash1 := testCrashWithRepro(build, 1)
c.client.ReportCrash(crash1)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
c.expectEQ(rep.Title, "title1")
- reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusOpen,
ReproLevel: dashapi.ReproLevelC,
@@ -195,21 +195,21 @@ func TestInvalidBug(t *testing.T) {
c.expectEQ(reply.OK, true)
{
- closed, _ := c.client.ReportingPollClosed([]string{rep.ID, "foobar"})
+ closed, _ := c.globalClient.ReportingPollClosed([]string{rep.ID, "foobar"})
c.expectEQ(len(closed), 0)
}
// Mark the bug as invalid.
- c.client.updateBug(rep.ID, dashapi.BugStatusInvalid, "")
+ c.globalClient.updateBug(rep.ID, dashapi.BugStatusInvalid, "")
{
- closed, _ := c.client.ReportingPollClosed([]string{rep.ID, "foobar"})
+ closed, _ := c.globalClient.ReportingPollClosed([]string{rep.ID, "foobar"})
c.expectEQ(len(closed), 1)
c.expectEQ(closed[0], rep.ID)
}
// Now it should not be reported in either reporting.
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
// Now a similar crash happens again.
crash2 := &dashapi.Crash{
@@ -222,7 +222,7 @@ func TestInvalidBug(t *testing.T) {
c.client.ReportCrash(crash2)
// Now it should be reported again.
- rep = c.client.pollBug()
+ rep = c.globalClient.pollBug()
c.expectNE(rep.ID, "")
_, dbCrash, dbBuild := c.loadBug(rep.ID)
want := &dashapi.BugReport{
@@ -292,9 +292,9 @@ func TestReportingQuota(t *testing.T) {
for _, reports := range []int{2, 2, 1, 0, 0} {
c.advanceTime(24 * time.Hour)
- c.client.pollBugs(reports)
+ c.globalClient.pollBugs(reports)
// Out of quota for today, so must get 0 reports.
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
}
}
@@ -317,24 +317,24 @@ func TestReproReportingQuota(t *testing.T) {
// First report of two.
c.advanceTime(time.Minute)
client.ReportCrash(testCrash(build, 1))
- client.pollBug()
+ c.globalClient.pollBug()
// Second report of two.
c.advanceTime(time.Minute)
crash := testCrash(build, 2)
client.ReportCrash(crash)
- client.pollBug()
+ c.globalClient.pollBug()
// Now we "find" a reproducer.
c.advanceTime(time.Minute)
client.ReportCrash(testCrashWithRepro(build, 1))
// But there's no quota for it.
- client.pollBugs(0)
+ c.globalClient.pollBugs(0)
// Wait a day and the quota appears.
c.advanceTime(time.Hour * 24)
- client.pollBug()
+ c.globalClient.pollBug()
}
// Basic dup scenario: mark one bug as dup of another.
@@ -351,45 +351,45 @@ func TestReportingDup(t *testing.T) {
crash2 := testCrash(build, 2)
c.client.ReportCrash(crash2)
- reports := c.client.pollBugs(2)
+ reports := c.globalClient.pollBugs(2)
rep1 := reports[0]
rep2 := reports[1]
// Dup.
- c.client.updateBug(rep2.ID, dashapi.BugStatusDup, rep1.ID)
+ c.globalClient.updateBug(rep2.ID, dashapi.BugStatusDup, rep1.ID)
{
// Both must be reported as open.
- closed, _ := c.client.ReportingPollClosed([]string{rep1.ID, rep2.ID})
+ closed, _ := c.globalClient.ReportingPollClosed([]string{rep1.ID, rep2.ID})
c.expectEQ(len(closed), 0)
}
// Undup.
- c.client.updateBug(rep2.ID, dashapi.BugStatusOpen, "")
+ c.globalClient.updateBug(rep2.ID, dashapi.BugStatusOpen, "")
// Dup again.
- c.client.updateBug(rep2.ID, dashapi.BugStatusDup, rep1.ID)
+ c.globalClient.updateBug(rep2.ID, dashapi.BugStatusDup, rep1.ID)
// Dup crash happens again, new bug must not be created.
c.client.ReportCrash(crash2)
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
// Now close the original bug, and check that new bugs for dup are now created.
- c.client.updateBug(rep1.ID, dashapi.BugStatusInvalid, "")
+ c.globalClient.updateBug(rep1.ID, dashapi.BugStatusInvalid, "")
{
// Now both must be reported as closed.
- closed, _ := c.client.ReportingPollClosed([]string{rep1.ID, rep2.ID})
+ closed, _ := c.globalClient.ReportingPollClosed([]string{rep1.ID, rep2.ID})
c.expectEQ(len(closed), 2)
c.expectEQ(closed[0], rep1.ID)
c.expectEQ(closed[1], rep2.ID)
}
c.client.ReportCrash(crash2)
- rep3 := c.client.pollBug()
+ rep3 := c.globalClient.pollBug()
c.expectEQ(rep3.Title, crash2.Title+" (2)")
// Unduping after the canonical bugs was closed must not work
// (we already created new bug for this report).
- reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep2.ID,
Status: dashapi.BugStatusOpen,
})
@@ -411,12 +411,12 @@ func TestReportingDupToClosed(t *testing.T) {
crash2 := testCrash(build, 2)
c.client.ReportCrash(crash2)
- reports := c.client.pollBugs(2)
- c.client.updateBug(reports[0].ID, dashapi.BugStatusInvalid, "")
- c.client.updateBug(reports[1].ID, dashapi.BugStatusDup, reports[0].ID)
+ reports := c.globalClient.pollBugs(2)
+ c.globalClient.updateBug(reports[0].ID, dashapi.BugStatusInvalid, "")
+ c.globalClient.updateBug(reports[1].ID, dashapi.BugStatusDup, reports[0].ID)
c.client.ReportCrash(crash2)
- rep2 := c.client.pollBug()
+ rep2 := c.globalClient.pollBug()
c.expectEQ(rep2.Title, crash2.Title+" (2)")
}
@@ -434,16 +434,16 @@ func TestReportingDupCrossReporting(t *testing.T) {
crash2 := testCrash(build, 2)
c.client.ReportCrash(crash2)
- reports := c.client.pollBugs(2)
+ reports := c.globalClient.pollBugs(2)
rep1 := reports[0]
rep2 := reports[1]
// Upstream second bug.
- c.client.updateBug(rep2.ID, dashapi.BugStatusUpstream, "")
- rep3 := c.client.pollBug()
+ c.globalClient.updateBug(rep2.ID, dashapi.BugStatusUpstream, "")
+ rep3 := c.globalClient.pollBug()
{
- closed, _ := c.client.ReportingPollClosed([]string{rep1.ID, rep2.ID, rep3.ID})
+ closed, _ := c.globalClient.ReportingPollClosed([]string{rep1.ID, rep2.ID, rep3.ID})
c.expectEQ(len(closed), 1)
c.expectEQ(closed[0], rep2.ID)
}
@@ -462,7 +462,7 @@ func TestReportingDupCrossReporting(t *testing.T) {
for _, cmd := range cmds {
t.Logf("duping %v -> %v", cmd.ID, cmd.DupOf)
cmd.Status = dashapi.BugStatusDup
- reply, _ := c.client.ReportingUpdate(cmd)
+ reply, _ := c.globalClient.ReportingUpdate(cmd)
c.expectEQ(reply.OK, false)
}
// Special case of cross-reporting duping:
@@ -472,7 +472,7 @@ func TestReportingDupCrossReporting(t *testing.T) {
DupOf: rep3.ID,
}
t.Logf("duping %v -> %v", cmd.ID, cmd.DupOf)
- reply, _ := c.client.ReportingUpdate(cmd)
+ reply, _ := c.globalClient.ReportingUpdate(cmd)
c.expectTrue(reply.OK)
}
@@ -490,18 +490,18 @@ func TestReportingDupCycle(t *testing.T) {
for i := 0; i < N; i++ {
t.Logf("*************** %v ***************", i)
c.client.ReportCrash(testCrash(build, i))
- reps[i] = c.client.pollBug()
+ reps[i] = c.globalClient.pollBug()
replyError := "Can't dup bug to itself."
if i != 0 {
replyError = "Setting this dup would lead to a bug cycle, cycles are not allowed."
- reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
Status: dashapi.BugStatusDup,
ID: reps[i-1].ID,
DupOf: reps[i].ID,
})
c.expectEQ(reply.OK, true)
}
- reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
Status: dashapi.BugStatusDup,
ID: reps[i].ID,
DupOf: reps[0].ID,
@@ -525,7 +525,7 @@ func TestReportingFilter(t *testing.T) {
c.client.ReportCrash(crash1)
// This does not skip first reporting, because it does not have repro.
- rep1 := c.client.pollBug()
+ rep1 := c.globalClient.pollBug()
c.expectEQ(string(rep1.Config), `{"Index":1}`)
crash1.ReproSyz = []byte("getpid()")
@@ -533,13 +533,13 @@ func TestReportingFilter(t *testing.T) {
// This has repro but was already reported to first reporting,
// so repro must go to the first reporting as well.
- rep2 := c.client.pollBug()
+ rep2 := c.globalClient.pollBug()
c.expectEQ(string(rep2.Config), `{"Index":1}`)
// Now upstream it and it must go to the second reporting.
- c.client.updateBug(rep1.ID, dashapi.BugStatusUpstream, "")
+ c.globalClient.updateBug(rep1.ID, dashapi.BugStatusUpstream, "")
- rep3 := c.client.pollBug()
+ rep3 := c.globalClient.pollBug()
c.expectEQ(string(rep3.Config), `{"Index":2}`)
// Now report a bug that must go to the second reporting right away.
@@ -548,7 +548,7 @@ func TestReportingFilter(t *testing.T) {
crash2.ReproSyz = []byte("getpid()")
c.client.ReportCrash(crash2)
- rep4 := c.client.pollBug()
+ rep4 := c.globalClient.pollBug()
c.expectEQ(string(rep4.Config), `{"Index":2}`)
}
@@ -572,7 +572,7 @@ func TestMachineInfo(t *testing.T) {
MachineInfo: machineInfo,
}
c.client.ReportCrash(crash)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
c.expectEQ(machineInfo, rep.MachineInfo)
// Check that a link to machine information page is created on the dashboard,
@@ -609,12 +609,12 @@ func TestAltTitles1(t *testing.T) {
crash2.AltTitles = []string{crash1.Title}
c.client.ReportCrash(crash1)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
c.expectEQ(rep.Title, crash1.Title)
c.expectEQ(rep.Log, crash1.Log)
c.client.ReportCrash(crash2)
- rep = c.client.pollBug()
+ rep = c.globalClient.pollBug()
c.expectEQ(rep.Title, crash1.Title)
c.expectEQ(rep.Log, crash2.Log)
}
@@ -632,12 +632,12 @@ func TestAltTitles2(t *testing.T) {
crash2.AltTitles = []string{crash1.Title}
c.client.ReportCrash(crash2)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
c.expectEQ(rep.Title, crash2.Title)
c.expectEQ(rep.Log, crash2.Log)
c.client.ReportCrash(crash1)
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
}
func TestAltTitles3(t *testing.T) {
@@ -654,9 +654,9 @@ func TestAltTitles3(t *testing.T) {
crash2.AltTitles = crash1.AltTitles
c.client.ReportCrash(crash1)
- c.client.pollBugs(1)
+ c.globalClient.pollBugs(1)
c.client.ReportCrash(crash2)
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
}
func TestAltTitles4(t *testing.T) {
@@ -675,11 +675,11 @@ func TestAltTitles4(t *testing.T) {
crash3.AltTitles = []string{"foobar2"}
c.client.ReportCrash(crash1)
- c.client.pollBugs(1)
+ c.globalClient.pollBugs(1)
c.client.ReportCrash(crash2)
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
c.client.ReportCrash(crash3)
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
}
func TestAltTitles5(t *testing.T) {
@@ -693,25 +693,25 @@ func TestAltTitles5(t *testing.T) {
crash1 := testCrash(build, 1)
crash1.AltTitles = []string{"foo"}
c.client.ReportCrash(crash1)
- c.client.pollBugs(1)
+ c.globalClient.pollBugs(1)
crash2 := testCrash(build, 2)
crash2.Title = "bar"
c.client.ReportCrash(crash2)
- c.client.pollBugs(1)
+ c.globalClient.pollBugs(1)
crash3 := testCrash(build, 3)
c.client.ReportCrash(crash3)
- c.client.pollBugs(1)
+ c.globalClient.pollBugs(1)
crash3.AltTitles = []string{"bar"}
c.client.ReportCrash(crash3)
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
crash := testCrashWithRepro(build, 10)
crash.Title = "foo"
crash.AltTitles = []string{"bar"}
c.client.ReportCrash(crash)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
c.expectEQ(rep.Title, crash2.Title)
c.expectEQ(rep.Log, crash.Log)
}
@@ -727,30 +727,30 @@ func TestAltTitles6(t *testing.T) {
crash1 := testCrash(build, 1)
crash1.AltTitles = []string{"foo"}
c.client.ReportCrash(crash1)
- rep := c.client.pollBug()
- c.client.updateBug(rep.ID, dashapi.BugStatusInvalid, "")
+ rep := c.globalClient.pollBug()
+ c.globalClient.updateBug(rep.ID, dashapi.BugStatusInvalid, "")
c.client.ReportCrash(crash1)
- c.client.pollBug()
+ c.globalClient.pollBug()
crash2 := testCrash(build, 2)
crash2.Title = "bar"
c.client.ReportCrash(crash2)
- rep = c.client.pollBug()
- c.client.updateBug(rep.ID, dashapi.BugStatusInvalid, "")
+ rep = c.globalClient.pollBug()
+ c.globalClient.updateBug(rep.ID, dashapi.BugStatusInvalid, "")
c.advanceTime(24 * time.Hour)
crash3 := testCrash(build, 3)
c.client.ReportCrash(crash3)
- c.client.pollBugs(1)
+ c.globalClient.pollBugs(1)
crash3.AltTitles = []string{"foo"}
c.client.ReportCrash(crash3)
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
crash := testCrashWithRepro(build, 10)
crash.Title = "foo"
crash.AltTitles = []string{"bar"}
c.client.ReportCrash(crash)
- rep = c.client.pollBug()
+ rep = c.globalClient.pollBug()
c.expectEQ(rep.Title, crash1.Title+" (2)")
c.expectEQ(rep.Log, crash.Log)
}
@@ -767,28 +767,28 @@ func TestAltTitles7(t *testing.T) {
crash1 := testCrash(build, 1)
crash1.AltTitles = []string{"foo"}
c.client.ReportCrash(crash1)
- c.client.pollBug()
+ c.globalClient.pollBug()
// This will be merged into crash1.
crash2 := testCrash(build, 2)
crash2.AltTitles = []string{"foo"}
c.client.ReportCrash(crash2)
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
// Now report a better candidate.
crash3 := testCrash(build, 3)
crash3.Title = "aaa"
c.client.ReportCrash(crash3)
- c.client.pollBug()
+ c.globalClient.pollBug()
crash3.AltTitles = []string{crash2.Title}
c.client.ReportCrash(crash3)
- c.client.pollBugs(0)
+ c.globalClient.pollBugs(0)
// Now report crash2 with a repro and ensure that it's still merged into crash1.
crash2.ReproOpts = []byte("some opts")
crash2.ReproSyz = []byte("getpid()")
c.client.ReportCrash(crash2)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
c.expectEQ(rep.Title, crash1.Title)
c.expectEQ(rep.Log, crash2.Log)
}
@@ -804,14 +804,14 @@ func TestDetachExternalTracker(t *testing.T) {
c.client.ReportCrash(crash1)
// Get single report for "test" type.
- resp, _ := c.client.ReportingPollBugs("test")
+ resp, _ := c.globalClient.ReportingPollBugs("test")
c.expectEQ(len(resp.Reports), 1)
rep1 := resp.Reports[0]
c.expectNE(rep1.ID, "")
c.expectEQ(string(rep1.Config), `{"Index":1}`)
// Signal detach_reporting for current bug.
- reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep1.ID,
Status: dashapi.BugStatusUpstream,
ReproLevel: dashapi.ReproLevelNone,
@@ -826,11 +826,11 @@ func TestDetachExternalTracker(t *testing.T) {
c.client.ReportCrash(crash1)
// Fetch bug and check reporting path (Config) is different.
- rep2 := c.client.pollBug()
+ rep2 := c.globalClient.pollBug()
c.expectNE(rep2.ID, "")
c.expectEQ(string(rep2.Config), `{"Index":2}`)
- closed, _ := c.client.ReportingPollClosed([]string{rep1.ID, rep2.ID})
+ closed, _ := c.globalClient.ReportingPollClosed([]string{rep1.ID, rep2.ID})
c.expectEQ(len(closed), 1)
c.expectEQ(closed[0], rep1.ID)
}
@@ -994,7 +994,7 @@ func TestFullBugInfo(t *testing.T) {
crashStrace.Flags = dashapi.CrashUnderStrace
crashStrace.Report = []byte("with strace")
c.client.ReportCrash(crashStrace)
- rep := c.client.pollBug()
+ rep := c.globalClient.pollBug()
// Newer: just with repro.
c.advanceTime(24 * 7 * time.Hour)
@@ -1004,7 +1004,7 @@ func TestFullBugInfo(t *testing.T) {
c.client.ReportCrash(crashRepro)
// Ensure we have some bisect jobs done.
- pollResp := c.client.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
c.expectNE(pollResp.ID, "")
jobID := pollResp.ID
done := &dashapi.JobDoneReq{
@@ -1021,8 +1021,8 @@ func TestFullBugInfo(t *testing.T) {
},
},
}
- c.client.expectOK(c.client.JobDone(done))
- c.client.pollBug()
+ c.client.expectOK(c.globalClient.JobDone(done))
+ c.globalClient.pollBug()
// Yet newer: no repro.
c.advanceTime(24 * 7 * time.Hour)
@@ -1067,7 +1067,7 @@ For more options, visit https://groups.google.com/d/optout.
_, otherExtBugID, _ := email.RemoveAddrContext(otherPollMsg.Sender)
// Query the full bug info.
- info, err := c.client.LoadFullBug(&dashapi.LoadFullBugReq{BugID: rep.ID})
+ info, err := c.globalClient.LoadFullBug(&dashapi.LoadFullBugReq{BugID: rep.ID})
c.expectOK(err)
if info.BisectCause == nil {
t.Fatalf("info.BisectCause is empty")
@@ -1101,7 +1101,7 @@ func TestUpdateReportApi(t *testing.T) {
// Report a crash.
c.client.ReportCrash(testCrashWithRepro(build, 1))
- c.client.pollBug()
+ c.globalClient.pollBug()
listResp, err := c.client.BugList()
c.expectOK(err)
@@ -1144,15 +1144,15 @@ func TestReportDecommissionedBugs(t *testing.T) {
crash := testCrash(build, 1)
client.ReportCrash(crash)
- rep := client.pollBug()
+ rep := c.globalClient.pollBug()
- closed, _ := client.ReportingPollClosed([]string{rep.ID})
+ closed, _ := c.globalClient.ReportingPollClosed([]string{rep.ID})
c.expectEQ(len(closed), 0)
// And now let's decommission the namespace.
c.decommission(rep.Namespace)
- closed, _ = client.ReportingPollClosed([]string{rep.ID})
+ closed, _ = c.globalClient.ReportingPollClosed([]string{rep.ID})
c.expectEQ(len(closed), 1)
c.expectEQ(closed[0], rep.ID)
}
@@ -1240,14 +1240,14 @@ func TestReportRevokedRepro(t *testing.T) {
crash.ReproOpts = []byte("repro opts")
crash.ReproSyz = []byte("repro syz")
client.ReportCrash(crash)
- rep1 := client.pollBug()
+ rep1 := c.globalClient.pollBug()
client.expectNE(rep1.ReproSyz, nil)
// Revoke the reproducer.
c.advanceTime(c.config().Obsoleting.ReproRetestStart + time.Hour)
- jobResp := client.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{TestPatches: true})
+ jobResp := c.globalClient.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{TestPatches: true})
c.expectEQ(jobResp.Type, dashapi.JobTestPatch)
- client.expectOK(client.JobDone(&dashapi.JobDoneReq{
+ client.expectOK(c.globalClient.JobDone(&dashapi.JobDoneReq{
ID: jobResp.ID,
}))
@@ -1256,15 +1256,15 @@ func TestReportRevokedRepro(t *testing.T) {
// Upstream the bug.
c.advanceTime(time.Hour)
- client.updateBug(rep1.ID, dashapi.BugStatusUpstream, "")
- rep2 := client.pollBug()
+ c.globalClient.updateBug(rep1.ID, dashapi.BugStatusUpstream, "")
+ rep2 := c.globalClient.pollBug()
// Also ensure that we do not report the revoked reproducer.
client.expectEQ(rep2.Type, dashapi.ReportNew)
client.expectEQ(rep2.ReproSyz, []byte(nil))
// Expect no further reports.
- client.pollBugs(0)
+ c.globalClient.pollBugs(0)
}
func TestWaitForRepro(t *testing.T) {
@@ -1279,28 +1279,28 @@ func TestWaitForRepro(t *testing.T) {
// Normal crash witout repro.
client.ReportCrash(testCrash(build, 1))
- client.pollBugs(0)
+ c.globalClient.pollBugs(0)
c.advanceTime(time.Hour * 24)
- client.pollBug()
+ c.globalClient.pollBug()
// A crash first without repro, then with it.
client.ReportCrash(testCrash(build, 2))
c.advanceTime(time.Hour * 12)
- client.pollBugs(0)
+ c.globalClient.pollBugs(0)
client.ReportCrash(testCrashWithRepro(build, 2))
- client.pollBug()
+ c.globalClient.pollBug()
// A crash with a reproducer.
c.advanceTime(time.Minute)
client.ReportCrash(testCrashWithRepro(build, 3))
- client.pollBug()
+ c.globalClient.pollBug()
// A crahs that will never have a reproducer.
c.advanceTime(time.Minute)
crash := testCrash(build, 4)
crash.Title = "upstream test error: abcd"
client.ReportCrash(crash)
- client.pollBug()
+ c.globalClient.pollBug()
}
// The test mimics the failure described in #5829.
@@ -1320,7 +1320,7 @@ func TestReportRevokedBisectCrash(t *testing.T) {
client.ReportCrash(crashRepro)
// Do a bisection.
- pollResp := client.pollJobs(build.Manager)
+ pollResp := c.globalClient.pollJobs(build.Manager)
c.expectNE(pollResp.ID, "")
c.expectEQ(pollResp.Type, dashapi.JobBisectCause)
done := &dashapi.JobDoneReq{
@@ -1336,23 +1336,23 @@ func TestReportRevokedBisectCrash(t *testing.T) {
},
},
}
- client.expectOK(client.JobDone(done))
- report := client.pollBug()
+ client.expectOK(c.globalClient.JobDone(done))
+ report := c.globalClient.pollBug()
// Revoke the reproducer.
c.advanceTime(c.config().Obsoleting.ReproRetestStart + time.Hour)
- resp := client.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{
+ resp := c.globalClient.pollSpecificJobs(build.Manager, dashapi.ManagerJobs{
TestPatches: true,
})
c.expectEQ(resp.Type, dashapi.JobTestPatch)
- client.expectOK(client.JobDone(&dashapi.JobDoneReq{
+ client.expectOK(c.globalClient.JobDone(&dashapi.JobDoneReq{
ID: resp.ID,
}))
// Move to the next reporting stage.
c.advanceTime(time.Hour)
- client.updateBug(report.ID, dashapi.BugStatusUpstream, "")
- report = client.pollBug()
+ c.globalClient.updateBug(report.ID, dashapi.BugStatusUpstream, "")
+ report = c.globalClient.pollBug()
client.expectNE(report.ReproCLink, "")
client.expectEQ(report.ReproIsRevoked, true)
@@ -1366,7 +1366,7 @@ func TestReportRevokedBisectCrash(t *testing.T) {
// There should be no new report.
// We already reported that the bug has a reproducer.
- client.pollBugs(0)
+ c.globalClient.pollBugs(0)
}
func TestCoverageRegression(t *testing.T) {
@@ -1430,41 +1430,41 @@ func TestSkipStage(t *testing.T) {
{
// Normal scenario - manual upstreaming.
client.ReportCrash(testCrash(build, 1))
- rep := client.pollBug()
+ rep := c.globalClient.pollBug()
c.expectEQ(string(rep.Config), `{"Index":1}`)
- c.client.updateBug(rep.ID, dashapi.BugStatusUpstream, "")
- client.pollNotifs(0)
- rep = client.pollBug()
+ c.globalClient.updateBug(rep.ID, dashapi.BugStatusUpstream, "")
+ c.globalClient.pollNotifs(0)
+ rep = c.globalClient.pollBug()
c.expectEQ(string(rep.Config), `{"Index":3}`)
- c.client.updateBug(rep.ID, dashapi.BugStatusInvalid, "")
+ c.globalClient.updateBug(rep.ID, dashapi.BugStatusInvalid, "")
}
{
// Auto-upstreamed.
client.ReportCrash(testCrash(build, 2))
- rep := client.pollBug()
+ rep := c.globalClient.pollBug()
c.expectEQ(string(rep.Config), `{"Index":1}`)
c.advanceTime(5 * 24 * time.Hour)
- notifs := client.pollNotifs(1)
- reply, _ := client.ReportingUpdate(&dashapi.BugUpdate{
+ notifs := c.globalClient.pollNotifs(1)
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: notifs[0].ID,
Status: dashapi.BugStatusUpstream,
Notification: true,
})
c.expectEQ(reply.OK, true)
- rep = client.pollBug()
+ rep = c.globalClient.pollBug()
c.expectEQ(string(rep.Config), `{"Index":2}`)
- c.client.updateBug(rep.ID, dashapi.BugStatusInvalid, "")
+ c.globalClient.updateBug(rep.ID, dashapi.BugStatusInvalid, "")
}
{
// Manually invalidated.
client.ReportCrash(testCrash(build, 3))
- rep := client.pollBug()
+ rep := c.globalClient.pollBug()
c.expectEQ(string(rep.Config), `{"Index":1}`)
- c.client.updateBug(rep.ID, dashapi.BugStatusInvalid, "")
- client.pollNotifs(0)
- client.pollBugs(0)
+ c.globalClient.updateBug(rep.ID, dashapi.BugStatusInvalid, "")
+ c.globalClient.pollNotifs(0)
+ c.globalClient.pollBugs(0)
}
{
@@ -1472,10 +1472,10 @@ func TestSkipStage(t *testing.T) {
crash := testCrash(build, 4)
crash.Title = "skip reporting1"
client.ReportCrash(crash)
- rep := client.pollBug()
+ rep := c.globalClient.pollBug()
c.expectEQ(string(rep.Config), `{"Index":2}`)
// If we do react, there would be an upstreaming notification.
- client.pollNotifs(0)
- c.client.updateBug(rep.ID, dashapi.BugStatusInvalid, "")
+ c.globalClient.pollNotifs(0)
+ c.globalClient.updateBug(rep.ID, dashapi.BugStatusInvalid, "")
}
}
diff --git a/dashboard/app/repro_test.go b/dashboard/app/repro_test.go
index 1b2aadb9d..98ae2951d 100644
--- a/dashboard/app/repro_test.go
+++ b/dashboard/app/repro_test.go
@@ -62,7 +62,7 @@ func testNeedRepro1(t *testing.T, crashCtor func(c *Ctx) *dashapi.Crash, newBug
resp, _ = c.client.ReportCrash(crash2)
c.expectEQ(resp.NeedRepro, false)
if newBug {
- c.client.pollBug()
+ c.globalClient.pollBug()
}
}
@@ -86,7 +86,7 @@ func testNeedRepro2(t *testing.T, crashCtor func(c *Ctx) *dashapi.Crash, newBug
needRepro, _ := c.client.NeedRepro(testCrashID(crash1))
c.expectEQ(needRepro, false)
if newBug {
- c.client.pollBug()
+ c.globalClient.pollBug()
}
}
@@ -139,7 +139,7 @@ func normalCrash(c *Ctx) *dashapi.Crash {
c.client.UploadBuild(build)
crash := testCrash(build, 1)
c.client.ReportCrash(crash)
- c.client.pollBug()
+ c.globalClient.pollBug()
return crash
}
@@ -149,8 +149,8 @@ func dupCrash(c *Ctx) *dashapi.Crash {
c.client.ReportCrash(testCrash(build, 1))
crash2 := testCrash(build, 2)
c.client.ReportCrash(crash2)
- reports := c.client.pollBugs(2)
- c.client.updateBug(reports[1].ID, dashapi.BugStatusDup, reports[0].ID)
+ reports := c.globalClient.pollBugs(2)
+ c.globalClient.updateBug(reports[1].ID, dashapi.BugStatusDup, reports[0].ID)
return crash2
}
@@ -173,12 +173,12 @@ func closedCrashImpl(c *Ctx, withRepro bool) *dashapi.Crash {
resp, _ := c.client.ReportCrash(crash)
c.expectEQ(resp.NeedRepro, !withRepro)
- rep := c.client.pollBug()
- c.client.updateBug(rep.ID, dashapi.BugStatusInvalid, "")
+ rep := c.globalClient.pollBug()
+ c.globalClient.updateBug(rep.ID, dashapi.BugStatusInvalid, "")
crash.ReproC = nil
c.client.ReportCrash(crash)
- c.client.pollBug()
+ c.globalClient.pollBug()
return crash
}
@@ -359,10 +359,10 @@ func TestFailedReproLogs(t *testing.T) {
}
c.client.ReportCrash(crash1)
- resp, _ := c.client.ReportingPollBugs("test")
+ resp, _ := c.globalClient.ReportingPollBugs("test")
c.expectEQ(len(resp.Reports), 1)
rep := resp.Reports[0]
- c.client.ReportingUpdate(&dashapi.BugUpdate{
+ c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusOpen,
})
@@ -414,12 +414,12 @@ func TestLogToReproduce(t *testing.T) {
build2 := testBuild(2)
client.UploadBuild(build2)
client.ReportCrash(testCrash(build2, 3))
- client.pollBug()
+ c.globalClient.pollBug()
// Bug with a reproducer.
crash1 := testCrashWithRepro(build, 1)
client.ReportCrash(crash1)
- client.pollBug()
+ c.globalClient.pollBug()
resp, err := client.LogToRepro(&dashapi.LogToReproReq{BuildID: "build1"})
c.expectOK(err)
c.expectEQ(resp.CrashLog, []byte(nil))
@@ -432,7 +432,7 @@ func TestLogToReproduce(t *testing.T) {
Report: []byte("report2"),
}
client.ReportCrash(crash2)
- client.pollBug()
+ c.globalClient.pollBug()
resp, err = client.LogToRepro(&dashapi.LogToReproReq{BuildID: "build1"})
c.expectOK(err)
c.expectEQ(resp.Title, "title2")
@@ -472,7 +472,7 @@ func TestReproForDifferentCrash(t *testing.T) {
Report: []byte("report1"),
}
client.ReportCrash(crash)
- oldBug := client.pollBug()
+ oldBug := c.globalClient.pollBug()
// Now we have "found" a reproducer with a different title.
crash.Title = "new title"
@@ -481,7 +481,7 @@ func TestReproForDifferentCrash(t *testing.T) {
crash.ReproLog = []byte("repro log")
crash.OriginalTitle = "title1"
client.ReportCrash(crash)
- client.pollBug()
+ c.globalClient.pollBug()
// Ensure that we have saved the reproduction log in this case.
dbBug, _, _ := c.loadBug(oldBug.ID)
diff --git a/dashboard/app/subsystem_test.go b/dashboard/app/subsystem_test.go
index 845bc41f3..da375a59a 100644
--- a/dashboard/app/subsystem_test.go
+++ b/dashboard/app/subsystem_test.go
@@ -47,7 +47,7 @@ func TestPeriodicSubsystemRefresh(t *testing.T) {
crash.Title = "WARNING: abcd"
crash.GuiltyFiles = []string{"test.c"}
client.ReportCrash(crash)
- rep := client.pollBug()
+ rep := c.globalClient.pollBug()
extID := rep.ID
// Initially there should be no subsystems.
@@ -90,7 +90,7 @@ func TestOpenBugRevRefresh(t *testing.T) {
crash := testCrash(build, 1)
crash.GuiltyFiles = []string{"test.c"}
client.ReportCrash(crash)
- rep := client.pollBug()
+ rep := c.globalClient.pollBug()
extID := rep.ID
// Initially there should be no subsystems.
@@ -126,11 +126,11 @@ func TestClosedBugSubsystemRefresh(t *testing.T) {
crash := testCrash(build, 1)
crash.GuiltyFiles = []string{"test.c"}
client.ReportCrash(crash)
- rep := client.pollBug()
+ rep := c.globalClient.pollBug()
extID := rep.ID
// "Fix" the bug.
- reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusOpen,
FixCommits: []string{"foo: fix the crash"},
@@ -140,7 +140,7 @@ func TestClosedBugSubsystemRefresh(t *testing.T) {
build2.Manager = build.Manager
build2.Commits = []string{"foo: fix the crash"}
client.UploadBuild(build2)
- client.pollNotifs(0)
+ c.globalClient.pollNotifs(0)
bug, _, _ := c.loadBug(rep.ID)
c.expectEQ(bug.Status, BugStatusFixed)
@@ -175,11 +175,11 @@ func TestInvalidBugSubsystemRefresh(t *testing.T) {
crash := testCrash(build, 1)
crash.GuiltyFiles = []string{"test.c"}
client.ReportCrash(crash)
- rep := client.pollBug()
+ rep := c.globalClient.pollBug()
extID := rep.ID
// Invalidate the bug.
- reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{
+ reply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: rep.ID,
Status: dashapi.BugStatusInvalid,
})
@@ -347,7 +347,7 @@ func TestPeriodicSubsystemReminders(t *testing.T) {
crash.Title = `WARNING: a third, keep in moderation` // see the config in app_test.go
crash.GuiltyFiles = []string{"a.c"}
client.ReportCrash(crash)
- client.pollBug()
+ c.globalClient.pollBug()
c.advanceTime(time.Hour)
_, err := c.GET("/cron/subsystem_reports")
@@ -551,7 +551,7 @@ func TestSubsystemReportGeneration(t *testing.T) {
client.ReportCrash(aFixed)
bugToExtID[aFixed.Title] = client.pollEmailExtID()
c.advanceTime(time.Hour)
- updReply, _ := client.ReportingUpdate(&dashapi.BugUpdate{
+ updReply, _ := c.globalClient.ReportingUpdate(&dashapi.BugUpdate{
ID: bugToExtID[aFixed.Title],
Status: dashapi.BugStatusOpen,
FixCommits: []string{"foo: fix1"},
@@ -762,7 +762,7 @@ func TestNoRemindersWithDiscussions(t *testing.T) {
client.ReportCrash(aThird)
// Add a recent discussion to the second bug.
- c.expectOK(client.SaveDiscussion(&dashapi.SaveDiscussionReq{
+ c.expectOK(c.globalClient.SaveDiscussion(&dashapi.SaveDiscussionReq{
Discussion: &dashapi.Discussion{
ID: "123",
Source: dashapi.DiscussionLore,
diff --git a/dashboard/app/tree_test.go b/dashboard/app/tree_test.go
index e2e5b8fd0..e4a04d11c 100644
--- a/dashboard/app/tree_test.go
+++ b/dashboard/app/tree_test.go
@@ -118,7 +118,7 @@ report1
---
This report is generated by a bot. It may contain errors.`)
// No notification must be sent.
- c.client.pollNotifs(0)
+ c.globalClient.pollNotifs(0)
}
func TestTreeOriginBetterReport(t *testing.T) {
@@ -235,7 +235,7 @@ func TestTreeOriginLtsBisection(t *testing.T) {
ctx.ctx.advanceTime(time.Hour)
// Expect a cross tree bisection request.
- job := ctx.client.pollSpecificJobs(ctx.manager, dashapi.ManagerJobs{BisectFix: true})
+ job := ctx.globalClient.pollSpecificJobs(ctx.manager, dashapi.ManagerJobs{BisectFix: true})
assert.Equal(t, dashapi.JobBisectFix, job.Type)
assert.Equal(t, "https://upstream.repo/repo", job.KernelRepo)
assert.Equal(t, "upstream-master", job.KernelBranch)
@@ -245,7 +245,7 @@ func TestTreeOriginLtsBisection(t *testing.T) {
ctx.ctx.advanceTime(time.Hour)
// Make sure we don't create the same job twice.
- job2 := ctx.client.pollSpecificJobs(ctx.manager, dashapi.ManagerJobs{BisectFix: true})
+ job2 := ctx.globalClient.pollSpecificJobs(ctx.manager, dashapi.ManagerJobs{BisectFix: true})
assert.Equal(t, "", job2.ID)
ctx.ctx.advanceTime(time.Hour)
@@ -255,18 +255,18 @@ func TestTreeOriginLtsBisection(t *testing.T) {
Log: []byte("bisect log"),
Error: []byte("bisect error"),
}
- c.expectOK(ctx.client.JobDone(done))
+ c.expectOK(ctx.globalClient.JobDone(done))
ctx.ctx.advanceTime(time.Hour)
// Ensure there are no new bisection requests.
- job = ctx.client.pollSpecificJobs(ctx.manager, dashapi.ManagerJobs{BisectFix: true})
+ job = ctx.globalClient.pollSpecificJobs(ctx.manager, dashapi.ManagerJobs{BisectFix: true})
assert.Equal(t, job.ID, "")
// Wait for the cooldown and request the job once more.
ctx.ctx.advanceTime(15 * 24 * time.Hour)
ctx.uploadBug(`https://downstream.repo/repo`, `master`, dashapi.ReproLevelC)
ctx.ctx.advanceTime(15 * 24 * time.Hour)
- job = ctx.client.pollSpecificJobs(ctx.manager, dashapi.ManagerJobs{BisectFix: true})
+ job = ctx.globalClient.pollSpecificJobs(ctx.manager, dashapi.ManagerJobs{BisectFix: true})
assert.Equal(t, job.KernelRepo, "https://upstream.repo/repo")
assert.Equal(t, job.KernelCommit, "badc0ffee")
@@ -294,11 +294,11 @@ func TestTreeOriginLtsBisection(t *testing.T) {
}
done.Build.ID = job.ID
ctx.ctx.advanceTime(time.Hour)
- c.expectOK(ctx.client.JobDone(done))
+ c.expectOK(ctx.globalClient.JobDone(done))
// Ensure the job is no longer created.
ctx.ctx.advanceTime(time.Hour)
- job = ctx.client.pollSpecificJobs(ctx.manager, dashapi.ManagerJobs{BisectFix: true})
+ job = ctx.globalClient.pollSpecificJobs(ctx.manager, dashapi.ManagerJobs{BisectFix: true})
assert.Equal(t, job.ID, "")
msg := ctx.emailWithoutURLs()
@@ -451,7 +451,7 @@ func TestNonfinalFixCandidateBisect(t *testing.T) {
ctx.ctx.advanceTime(time.Hour)
// Ensure the code does not fail.
- job := ctx.client.pollSpecificJobs(ctx.manager, dashapi.ManagerJobs{BisectFix: true})
+ job := ctx.globalClient.pollSpecificJobs(ctx.manager, dashapi.ManagerJobs{BisectFix: true})
assert.Equal(t, "", job.ID)
}
@@ -464,7 +464,7 @@ func TestTreeBisectionBeforeOrigin(t *testing.T) {
ctx.reportToEmail()
// Ensure the job is no longer created.
ctx.ctx.advanceTime(time.Hour)
- job := ctx.client.pollSpecificJobs(ctx.manager, dashapi.ManagerJobs{BisectFix: true})
+ job := ctx.globalClient.pollSpecificJobs(ctx.manager, dashapi.ManagerJobs{BisectFix: true})
assert.Equal(t, "", job.ID)
}
@@ -1000,25 +1000,27 @@ func TestTreeConfigAppend(t *testing.T) {
func setUpTreeTest(ctx *Ctx, repos []KernelRepo) *treeTestCtx {
ret := &treeTestCtx{
- ctx: ctx,
- client: ctx.makeClient(clientTreeTests, keyTreeTests, true),
- manager: "test-manager",
+ ctx: ctx,
+ globalClient: ctx.makeClient(reportingClient, reportingKey, true),
+ client: ctx.makeClient(clientTreeTests, keyTreeTests, true),
+ manager: "test-manager",
}
ret.updateRepos(repos)
return ret
}
type treeTestCtx struct {
- ctx *Ctx
- client *apiClient
- bug *Bug
- bugReport *dashapi.BugReport
- start time.Time
- entries []treeTestEntry
- perAlias map[string]KernelRepo
- jobTestDays []int
- manager string
- validateJob func(*dashapi.JobPollResp)
+ ctx *Ctx
+ globalClient *apiClient
+ client *apiClient
+ bug *Bug
+ bugReport *dashapi.BugReport
+ start time.Time
+ entries []treeTestEntry
+ perAlias map[string]KernelRepo
+ jobTestDays []int
+ manager string
+ validateJob func(*dashapi.JobPollResp)
}
func (ctx *treeTestCtx) now() time.Time {
@@ -1059,7 +1061,7 @@ func (ctx *treeTestCtx) uploadBuildCrash(build *dashapi.Build, lvl dashapi.Repro
}
ctx.client.ReportCrash(crash)
if ctx.bug == nil || ctx.bug.ReproLevel < lvl {
- ctx.bugReport = ctx.client.pollBug()
+ ctx.bugReport = ctx.globalClient.pollBug()
if ctx.bug == nil {
bug, _, err := findBugByReportingID(ctx.ctx.ctx, ctx.bugReport.ID)
ctx.ctx.expectOK(err)
@@ -1091,7 +1093,7 @@ func (ctx *treeTestCtx) moveToDay(tillDay int) {
ctx.ctx.t.Logf("executing jobs on day %d", seqDay)
// Execute jobs until they exist.
for {
- pollResp := ctx.client.pollSpecificJobs(ctx.manager, dashapi.ManagerJobs{
+ pollResp := ctx.globalClient.pollSpecificJobs(ctx.manager, dashapi.ManagerJobs{
TestPatches: true,
})
if pollResp.ID == "" {
@@ -1172,7 +1174,7 @@ func (ctx *treeTestCtx) doJob(resp *dashapi.JobPollResp, day int) {
jobDoneReq.Error = []byte("failed to apply patch")
}
found.jobsDone++
- ctx.ctx.expectOK(ctx.client.JobDone(jobDoneReq))
+ ctx.ctx.expectOK(ctx.globalClient.JobDone(jobDoneReq))
}
func (ctx *treeTestCtx) ensureLabels(labels ...string) {
@@ -1201,12 +1203,12 @@ func (ctx *treeTestCtx) bugLink() string {
}
func (ctx *treeTestCtx) reportToEmail() *aemail.Message {
- ctx.client.updateBug(ctx.bugReport.ID, dashapi.BugStatusUpstream, "")
+ ctx.globalClient.updateBug(ctx.bugReport.ID, dashapi.BugStatusUpstream, "")
return ctx.ctx.pollEmailBug()
}
func (ctx *treeTestCtx) fullBugInfo() *dashapi.FullBugInfo {
- info, err := ctx.client.LoadFullBug(&dashapi.LoadFullBugReq{
+ info, err := ctx.globalClient.LoadFullBug(&dashapi.LoadFullBugReq{
BugID: ctx.bugReport.ID,
})
ctx.ctx.expectOK(err)
diff --git a/dashboard/app/util_test.go b/dashboard/app/util_test.go
index d2862bd22..9902e654e 100644
--- a/dashboard/app/util_test.go
+++ b/dashboard/app/util_test.go
@@ -52,6 +52,7 @@ type Ctx struct {
mockedTime time.Time
emailSink chan *aemail.Message
transformContext func(context.Context) context.Context
+ globalClient *apiClient
client *apiClient
client2 *apiClient
publicClient *apiClient
@@ -97,6 +98,7 @@ func newCtx(t *testing.T, appID string) *Ctx {
transformContext: func(ctx context.Context) context.Context { return ctx },
checkAI: appID != "",
}
+ ctx.globalClient = ctx.makeClient(reportingClient, reportingKey, true)
ctx.client = ctx.makeClient(client1, password1, true)
ctx.client2 = ctx.makeClient(client2, password2, true)
ctx.publicClient = ctx.makeClient(clientPublicEmail, keyPublicEmail, true)
@@ -286,7 +288,7 @@ func (ctx *Ctx) Close() {
ctx.t.Errorf("ERROR: leftover email: %v", (<-ctx.emailSink).Body)
}
// No pending external reports (tests need to consume them).
- resp, _ := ctx.client.ReportingPollBugs("test")
+ resp, _ := ctx.globalClient.ReportingPollBugs("test")
for _, rep := range resp.Reports {
ctx.t.Errorf("ERROR: leftover external report:\n%#v", rep)
}