aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--dashboard/app/batch_coverage.go (renamed from dashboard/app/coverage_batch.go)108
-rw-r--r--dashboard/app/batch_main.go99
-rw-r--r--dashboard/app/batch_reproexport.go37
-rw-r--r--dashboard/app/config.go4
-rw-r--r--tools/syz-reprolist/external_api.go60
-rw-r--r--tools/syz-reprolist/reprolist.go135
6 files changed, 343 insertions, 100 deletions
diff --git a/dashboard/app/coverage_batch.go b/dashboard/app/batch_coverage.go
index 8a720066c..09f9e71ff 100644
--- a/dashboard/app/coverage_batch.go
+++ b/dashboard/app/batch_coverage.go
@@ -9,23 +9,16 @@ import (
"net/http"
"strconv"
- "cloud.google.com/go/batch/apiv1"
"cloud.google.com/go/batch/apiv1/batchpb"
"cloud.google.com/go/bigquery"
"cloud.google.com/go/civil"
"github.com/google/syzkaller/pkg/coveragedb"
- "github.com/google/uuid"
"google.golang.org/api/iterator"
"google.golang.org/appengine/v2"
"google.golang.org/appengine/v2/log"
- "google.golang.org/protobuf/types/known/durationpb"
)
-func initCoverageBatches() {
- http.HandleFunc("/cron/batch_coverage", handleBatchCoverage)
-}
-
-const batchTimeoutSeconds = 60 * 60 * 12
+const batchCoverageTimeoutSeconds = 60 * 60 * 12
func handleBatchCoverage(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
@@ -73,21 +66,24 @@ func handleBatchCoverage(w http.ResponseWriter, r *http.Request) {
}
periods = coveragedb.AtMostNLatestPeriods(periods, maxSteps)
nsCovConfig := nsConfig.Coverage
- if err := createScriptJob(
- ctx,
- nsCovConfig.BatchProject,
- nsCovConfig.BatchServiceAccount,
- batchScript(ns, repo, branch, periods,
+ serviceAccount := &batchpb.ServiceAccount{
+ Email: nsCovConfig.BatchServiceAccount,
+ Scopes: nsCovConfig.BatchScopes,
+ }
+ if err := createScriptJob(ctx, nsCovConfig.BatchProject, "coverage-merge",
+ batchCoverageScript(ns, repo, branch, periods,
nsCovConfig.JobInitScript,
nsCovConfig.SyzEnvInitScript,
nsCovConfig.DashboardClientName),
- nsCovConfig.BatchScopes); err != nil {
- log.Errorf(ctx, "failed to batchScript: %s", err.Error())
+ batchCoverageTimeoutSeconds,
+ serviceAccount,
+ ); err != nil {
+ log.Errorf(ctx, "failed to batchCoverageScript: %s", err.Error())
}
}
}
-func batchScript(ns, repo, branch string, periods []coveragedb.TimePeriod,
+func batchCoverageScript(ns, repo, branch string, periods []coveragedb.TimePeriod,
jobInitScript, syzEnvInitScript, clientName string) string {
if clientName == "" {
clientName = defaultDashboardClientName
@@ -115,86 +111,6 @@ func batchScript(ns, repo, branch string, periods []coveragedb.TimePeriod,
return script
}
-// from https://cloud.google.com/batch/docs/samples/batch-create-script-job
-func createScriptJob(ctx context.Context, projectID, serviceAccount, script string, scopes []string) error {
- region := "us-central1"
- jobName := fmt.Sprintf("coverage-merge-%s", uuid.New().String())
-
- batchClient, err := batch.NewClient(ctx)
- if err != nil {
- return fmt.Errorf("failed NewClient: %w", err)
- }
- defer batchClient.Close()
-
- taskGroups := []*batchpb.TaskGroup{
- {
- TaskSpec: &batchpb.TaskSpec{
- Runnables: []*batchpb.Runnable{{
- Executable: &batchpb.Runnable_Script_{
- Script: &batchpb.Runnable_Script{Command: &batchpb.Runnable_Script_Text{
- Text: script,
- }},
- },
- }},
- ComputeResource: &batchpb.ComputeResource{
- // CpuMilli is milliseconds per cpu-second. This means the task requires 2 whole CPUs.
- CpuMilli: 4000,
- MemoryMib: 12 * 1024,
- },
- MaxRunDuration: &durationpb.Duration{
- Seconds: batchTimeoutSeconds,
- },
- },
- },
- }
-
- // Policies are used to define on what kind of virtual machines the tasks will run on.
- // In this case, we tell the system to use "e2-standard-4" machine type.
- // Read more about machine types here: https://cloud.google.com/compute/docs/machine-types
- allocationPolicy := &batchpb.AllocationPolicy{
- Instances: []*batchpb.AllocationPolicy_InstancePolicyOrTemplate{{
- PolicyTemplate: &batchpb.AllocationPolicy_InstancePolicyOrTemplate_Policy{
- Policy: &batchpb.AllocationPolicy_InstancePolicy{
- ProvisioningModel: batchpb.AllocationPolicy_SPOT,
- MachineType: "c3-standard-4",
- },
- },
- }},
- ServiceAccount: &batchpb.ServiceAccount{
- Email: serviceAccount,
- Scopes: scopes,
- },
- }
-
- logsPolicy := &batchpb.LogsPolicy{
- Destination: batchpb.LogsPolicy_CLOUD_LOGGING,
- }
-
- // The job's parent is the region in which the job will run.
- parent := fmt.Sprintf("projects/%s/locations/%s", projectID, region)
-
- job := batchpb.Job{
- TaskGroups: taskGroups,
- AllocationPolicy: allocationPolicy,
- LogsPolicy: logsPolicy,
- }
-
- req := &batchpb.CreateJobRequest{
- Parent: parent,
- JobId: jobName,
- Job: &job,
- }
-
- createdJob, err := batchClient.CreateJob(ctx, req)
- if err != nil {
- return fmt.Errorf("unable to create job: %w", err)
- }
-
- log.Infof(ctx, "job created: %v\n", createdJob)
-
- return nil
-}
-
func nsDataAvailable(ctx context.Context, ns string) ([]coveragedb.TimePeriod, []int64, error) {
client, err := bigquery.NewClient(ctx, "syzkaller")
if err != nil {
diff --git a/dashboard/app/batch_main.go b/dashboard/app/batch_main.go
new file mode 100644
index 000000000..acf37ee8f
--- /dev/null
+++ b/dashboard/app/batch_main.go
@@ -0,0 +1,99 @@
+// Copyright 2017 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package main
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ "cloud.google.com/go/batch/apiv1"
+ "cloud.google.com/go/batch/apiv1/batchpb"
+ "github.com/google/uuid"
+ "google.golang.org/appengine/v2/log"
+ "google.golang.org/protobuf/types/known/durationpb"
+)
+
+func initBatchProcessors() {
+ http.HandleFunc("/cron/batch_coverage", handleBatchCoverage)
+ http.HandleFunc("/cron/batch_reproexport", handleBatchReproExport)
+}
+
+// from https://cloud.google.com/batch/docs/samples/batch-create-script-job
+func createScriptJob(ctx context.Context, projectID, jobNamePrefix, script string,
+ timeout int64, sa *batchpb.ServiceAccount) error {
+ region := "us-central1"
+ jobName := fmt.Sprintf("%s-%s", jobNamePrefix, uuid.New().String())
+
+ batchClient, err := batch.NewClient(ctx)
+ if err != nil {
+ return fmt.Errorf("failed NewClient: %w", err)
+ }
+ defer batchClient.Close()
+
+ taskGroups := []*batchpb.TaskGroup{
+ {
+ TaskSpec: &batchpb.TaskSpec{
+ Runnables: []*batchpb.Runnable{{
+ Executable: &batchpb.Runnable_Script_{
+ Script: &batchpb.Runnable_Script{Command: &batchpb.Runnable_Script_Text{
+ Text: script,
+ }},
+ },
+ }},
+ ComputeResource: &batchpb.ComputeResource{
+ // CpuMilli is milliseconds per cpu-second. This means the task requires 2 whole CPUs.
+ CpuMilli: 4000,
+ MemoryMib: 12 * 1024,
+ },
+ MaxRunDuration: &durationpb.Duration{
+ Seconds: timeout,
+ },
+ },
+ },
+ }
+
+ // Policies are used to define on what kind of virtual machines the tasks will run on.
+ // In this case, we tell the system to use "e2-standard-4" machine type.
+ // Read more about machine types here: https://cloud.google.com/compute/docs/machine-types
+ allocationPolicy := &batchpb.AllocationPolicy{
+ Instances: []*batchpb.AllocationPolicy_InstancePolicyOrTemplate{{
+ PolicyTemplate: &batchpb.AllocationPolicy_InstancePolicyOrTemplate_Policy{
+ Policy: &batchpb.AllocationPolicy_InstancePolicy{
+ ProvisioningModel: batchpb.AllocationPolicy_SPOT,
+ MachineType: "c3-standard-4",
+ },
+ },
+ }},
+ ServiceAccount: sa,
+ }
+
+ logsPolicy := &batchpb.LogsPolicy{
+ Destination: batchpb.LogsPolicy_CLOUD_LOGGING,
+ }
+
+ // The job's parent is the region in which the job will run.
+ parent := fmt.Sprintf("projects/%s/locations/%s", projectID, region)
+
+ job := batchpb.Job{
+ TaskGroups: taskGroups,
+ AllocationPolicy: allocationPolicy,
+ LogsPolicy: logsPolicy,
+ }
+
+ req := &batchpb.CreateJobRequest{
+ Parent: parent,
+ JobId: jobName,
+ Job: &job,
+ }
+
+ createdJob, err := batchClient.CreateJob(ctx, req)
+ if err != nil {
+ return fmt.Errorf("unable to create job: %w", err)
+ }
+
+ log.Infof(ctx, "job created: %v\n", createdJob)
+
+ return nil
+}
diff --git a/dashboard/app/batch_reproexport.go b/dashboard/app/batch_reproexport.go
new file mode 100644
index 000000000..845d901fe
--- /dev/null
+++ b/dashboard/app/batch_reproexport.go
@@ -0,0 +1,37 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package main
+
+import (
+ "net/http"
+
+ "google.golang.org/appengine/v2"
+ "google.golang.org/appengine/v2/log"
+)
+
+const exportTimeoutSeconds = 60 * 60 * 6
+
+func handleBatchReproExport(w http.ResponseWriter, r *http.Request) {
+ ctx := appengine.NewContext(r)
+ for ns, nsConfig := range getConfig(ctx).Namespaces {
+ if nsConfig.ReproExportPath == "" {
+ continue
+ }
+ if err := createScriptJob(ctx, "syzkaller", "export-repro",
+ exportReproScript(ns, nsConfig.ReproExportPath), exportTimeoutSeconds, nil); err != nil {
+ log.Errorf(ctx, "createScriptJob: %s", err.Error())
+ }
+ }
+}
+
+func exportReproScript(srcNamespace, archivePath string) string {
+ return "\n" +
+ "git clone --depth 1 --branch master --single-branch https://github.com/google/syzkaller\n" +
+ "cd syzkaller\n" +
+ "./tools/syz-env \"" +
+ "go run ./tools/syz-reprolist/... -namespace " + srcNamespace + " && " +
+ "tar -czvf reproducers.tar.gz ./repros/ && " +
+ "gsutil -m cp reproducers.tar.gz " + archivePath +
+ "\""
+}
diff --git a/dashboard/app/config.go b/dashboard/app/config.go
index 0e30c9320..b8d810440 100644
--- a/dashboard/app/config.go
+++ b/dashboard/app/config.go
@@ -124,6 +124,8 @@ type Config struct {
CacheUIPages bool
// Enables coverage aggregation.
Coverage *CoverageConfig
+ // Reproducers export path.
+ ReproExportPath string
}
const defaultDashboardClientName = "coverage-merger"
@@ -393,7 +395,7 @@ func installConfig(cfg *GlobalConfig) {
initHTTPHandlers()
initAPIHandlers()
initKcidb()
- initCoverageBatches()
+ initBatchProcessors()
}
var contextConfigKey = "Updated config (to be used during tests). Use only in tests!"
diff --git a/tools/syz-reprolist/external_api.go b/tools/syz-reprolist/external_api.go
new file mode 100644
index 000000000..36fe4ef68
--- /dev/null
+++ b/tools/syz-reprolist/external_api.go
@@ -0,0 +1,60 @@
+// Copyright 2024 syzkaller project authors. All rights reserved.
+// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
+
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "strings"
+)
+
+type bugList struct {
+ Version int
+ Bugs []struct {
+ Link string
+ }
+}
+
+func reproIDFromURL(url string) string {
+ parts := strings.Split(url, "&")
+ if len(parts) != 2 {
+ log.Panicf("can't split %s in two parts by ?", url)
+ }
+ parts = strings.Split(parts[1], "=")
+ if len(parts) != 2 {
+ log.Panicf("can't split %s in two parts by =", url)
+ }
+ return parts[1]
+}
+
+func getBugList(jsonBugs []byte) ([]string, error) {
+ var bl bugList
+ if err := json.Unmarshal(jsonBugs, &bl); err != nil {
+ return nil, fmt.Errorf("json.Unmarshal: %w", err)
+ }
+ if bl.Version != 1 {
+ return nil, fmt.Errorf("unsupported export version %d", bl.Version)
+ }
+ res := []string{}
+ for _, b := range bl.Bugs {
+ res = append(res, b.Link)
+ }
+ return res, nil
+}
+
+type BugDetails struct {
+ ID string
+ Crashes []struct {
+ CReproURL string `json:"c-reproducer"`
+ }
+}
+
+func makeBugDetails(jsonDetails []byte) (*BugDetails, error) {
+ var bd BugDetails
+ if err := json.Unmarshal(jsonDetails, &bd); err != nil {
+ return nil, fmt.Errorf("json.Unmarshal: %w", err)
+ }
+ return &bd, nil
+}
diff --git a/tools/syz-reprolist/reprolist.go b/tools/syz-reprolist/reprolist.go
index 3b0aa94d0..6970b2da3 100644
--- a/tools/syz-reprolist/reprolist.go
+++ b/tools/syz-reprolist/reprolist.go
@@ -4,10 +4,15 @@
package main
import (
+ "context"
"flag"
"fmt"
+ "html"
+ "io"
"log"
+ "net/http"
"os"
+ "path"
"path/filepath"
"runtime"
"strings"
@@ -19,6 +24,7 @@ import (
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/vcs"
"github.com/google/syzkaller/sys/targets"
+ "golang.org/x/sync/errgroup"
)
var (
@@ -28,17 +34,27 @@ var (
flagOutputDir = flag.String("output", "repros", "output dir")
flagSyzkallerDir = flag.String("syzkaller", ".", "syzkaller dir")
flagOS = flag.String("os", runtime.GOOS, "target OS")
+ flagNamespace = flag.String("namespace", "", "target namespace")
+ flagToken = flag.String("token", "", "gcp bearer token to disable throttling (contact syzbot first)\n"+
+ "usage example: ./tools/syz-reprolist -namespace upstream -token $(gcloud auth pring-access-token)")
+ flagParallel = flag.Int("j", 2, "number of parallel threads")
)
func main() {
flag.Parse()
+ if err := os.MkdirAll(*flagOutputDir, 0755); err != nil {
+ log.Fatalf("alert: failed to create output dir: %v", err)
+ }
+ if *flagNamespace != "" {
+ if err := exportNamespace(); err != nil {
+ log.Fatalf("alert: error: %s", err.Error())
+ }
+ return
+ }
clients := strings.Split(*flagAPIClients, ",")
if len(clients) == 0 {
log.Fatalf("api client is required")
}
- if err := os.MkdirAll(*flagOutputDir, 0755); err != nil {
- log.Fatalf("failed to create output dir: %v", err)
- }
for _, client := range clients {
log.Printf("processing client %v", client)
dash, err := dashapi.New(client, *flagDashboard, *flagAPIKey)
@@ -272,3 +288,116 @@ func containsCommit(hash string) bool {
_, err := osutil.RunCmd(time.Hour, *flagSyzkallerDir, "git", "merge-base", "--is-ancestor", hash, "HEAD")
return err == nil
}
+
+func exportNamespace() error {
+ bugURLs, err := getFullBugList()
+ if err != nil {
+ return err
+ }
+ fmt.Printf("total %d bugs available\n", len(bugURLs))
+
+ iBugChan := make(chan int)
+ g, _ := errgroup.WithContext(context.Background())
+ for i := 0; i < *flagParallel; i++ {
+ g.Go(func() error {
+ for iBug := range iBugChan {
+ bugURL := *flagDashboard + bugURLs[iBug]
+ bugBody, err := getJSONBody(bugURL)
+ if err != nil {
+ return fmt.Errorf("getJSONBody(%s): %w", bugURL, err)
+ }
+ bugDetails, err := makeBugDetails(bugBody)
+ if err != nil {
+ return fmt.Errorf("makeBugDetails: %w", err)
+ }
+ if cReproURL := bugDetails.Crashes[0].CReproURL; cReproURL != "" { // export max 1 CRepro per bug
+ reproID := reproIDFromURL(cReproURL)
+ fmt.Printf("[%d](%d/%d)saving c-repro %s for bug %s\n",
+ i, iBug, len(bugURLs), reproID, bugDetails.ID)
+ fullReproURL := *flagDashboard + html.UnescapeString(cReproURL)
+ cReproBody, err := getJSONBody(fullReproURL)
+ if err != nil {
+ return fmt.Errorf("getJSONBody(%s): %w", fullReproURL, err)
+ }
+ if err := saveCRepro(reproID, cReproBody); err != nil {
+ return fmt.Errorf("saveRepro(bugID=%s, reproID=%s): %w", bugDetails.ID, reproID, err)
+ }
+ }
+ }
+ return nil
+ })
+ }
+ errChan := make(chan error)
+ go func() {
+ errChan <- g.Wait()
+ }()
+ for iBug := range bugURLs {
+ select {
+ case iBugChan <- iBug:
+ case err := <-errChan:
+ return err
+ }
+ }
+ close(iBugChan)
+ return g.Wait()
+}
+
+func getFullBugList() ([]string, error) {
+ bugLists := []string{
+ *flagDashboard + "/" + *flagNamespace,
+ *flagDashboard + "/" + *flagNamespace + "/fixed",
+ }
+ fullBugList := []string{}
+ for _, url := range bugLists {
+ fmt.Printf("loading bug list from %s\n", url)
+ body, err := getJSONBody(url)
+ if err != nil {
+ return nil, fmt.Errorf("getBody(%s): %w", url, err)
+ }
+ bugs, err := getBugList(body)
+ if err != nil {
+ return nil, fmt.Errorf("bugList: %w", err)
+ }
+ fullBugList = append(fullBugList, bugs...)
+ }
+ return fullBugList, nil
+}
+
+func getJSONBody(url string) ([]byte, error) {
+ if strings.Contains(url, "?") {
+ url = url + "&json=1"
+ } else {
+ url = url + "?json=1"
+ }
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, fmt.Errorf("http.NewRequest: %w", err)
+ }
+ if *flagToken != "" {
+ req.Header.Add("Authorization", "Bearer "+*flagToken)
+ } else {
+ time.Sleep(time.Second) // tolerate throttling
+ }
+ client := &http.Client{}
+ res, err := client.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("http.Get(%s): %w", url, err)
+ }
+ defer res.Body.Close()
+ body, err := io.ReadAll(res.Body)
+ if res.StatusCode > 299 {
+ return nil, fmt.Errorf("io.ReadAll failed with status code: %d and\nbody: %s", res.StatusCode, body)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("io.ReadAll(body): %w", err)
+ }
+ return body, nil
+}
+
+func saveCRepro(reproID string, reproData []byte) error {
+ reproPath := path.Join(*flagOutputDir, reproID+".c")
+ if err := os.WriteFile(reproPath, reproData, 0666); err != nil {
+ return fmt.Errorf("os.WriteFile: %w", err)
+ }
+ return nil
+}