diff options
| author | Taras Madan <tarasmadan@google.com> | 2024-12-18 13:33:01 +0100 |
|---|---|---|
| committer | Taras Madan <tarasmadan@google.com> | 2024-12-19 14:38:21 +0000 |
| commit | fef5bb5152a5013d0c4571cd0601f46824a9664b (patch) | |
| tree | c8fc887f6bfe48ae314a55f439ca1e7cbea370c7 | |
| parent | f1c188dc6963bf016692df095d783e4945d84f7b (diff) | |
tools/syz-covermerger: upload coverage as jsonl
Previous implementation store only the summary of processed records.
The summary was <1GB and single processing node was able to manipulate the data.
Current implementation stores all the details about records read to make post-processing more flexible.
This change was needed to get access to the source manager name and will help to analyze other details.
This new implementation requires 20GB mem to process single day records.
CSV log interning experiment allowed to merge using 10G.
Quarter data aggregation will cost ~100 times more.
The alternative is to use stream processing. We can process data kernel-file-by-file.
It allows to /15000 memory consumption.
This approach is implemented here.
We're batching coverage signals by file and store per-file results in GCS JSONL file.
See https://jsonlines.org/ to learn about jsonl.
| -rw-r--r-- | dashboard/app/api.go | 36 | ||||
| -rw-r--r-- | dashboard/dashapi/dashapi.go | 44 | ||||
| -rw-r--r-- | pkg/cover/file.go | 18 | ||||
| -rw-r--r-- | pkg/coveragedb/spanner.go | 73 | ||||
| -rw-r--r-- | pkg/covermerger/covermerger.go | 41 | ||||
| -rw-r--r-- | pkg/covermerger/covermerger_test.go | 14 | ||||
| -rw-r--r-- | tools/syz-covermerger/syz_covermerger.go | 180 |
7 files changed, 222 insertions, 184 deletions
diff --git a/dashboard/app/api.go b/dashboard/app/api.go index 21ded4bec..ecdecd6bf 100644 --- a/dashboard/app/api.go +++ b/dashboard/app/api.go @@ -1938,37 +1938,27 @@ func apiCreateUploadURL(c context.Context, payload io.Reader) (interface{}, erro } return fmt.Printf("%s/%s.upload", bucket, uuid.New().String()) } + +// apiSaveCoverage reads jsonl data from payload and stores it to coveragedb. +// First payload jsonl line is a coveragedb.HistoryRecord (w/o session and time). +// Second+ records are coveragedb.MergedCoverageRecord. func apiSaveCoverage(c context.Context, payload io.Reader) (interface{}, error) { - req := new(dashapi.SaveCoverageReq) - if err := json.NewDecoder(payload).Decode(req); err != nil { - return nil, fmt.Errorf("failed to unmarshal request: %w", err) + descr := new(coveragedb.HistoryRecord) + if err := json.NewDecoder(payload).Decode(descr); err != nil { + return 0, fmt.Errorf("json.NewDecoder(dashapi.MergedCoverageDescription).Decode: %w", err) } - coverage := req.Coverage var sss []*subsystem.Subsystem - if service := getNsConfig(c, coverage.Namespace).Subsystems.Service; service != nil { + if service := getNsConfig(c, descr.Namespace).Subsystems.Service; service != nil { sss = service.List() - log.Infof(c, "found %d subsystems for %s namespace", len(sss), coverage.Namespace) - } - rowsCreated, err := coveragedb.SaveMergeResult( - context.Background(), - appengine.AppID(context.Background()), - coverage.FileData, - &coveragedb.HistoryRecord{ - Namespace: coverage.Namespace, - Repo: coverage.Repo, - Commit: coverage.Commit, - Duration: coverage.Duration, - DateTo: coverage.DateTo, - }, - coverage.TotalRows, - sss, - ) + log.Infof(c, "found %d subsystems for %s namespace", len(sss), descr.Namespace) + } + rowsCreated, err := coveragedb.SaveMergeResult(c, appengine.AppID(context.Background()), descr, payload, sss) if err != nil { log.Errorf(c, "error storing coverage for ns %s, date %s: %v", - coverage.Namespace, coverage.DateTo.String(), err) + descr.Namespace, descr.DateTo.String(), err) } else { log.Infof(c, "updated coverage for ns %s, date %s to %d rows", - coverage.Namespace, coverage.DateTo.String(), coverage.TotalRows) + descr.Namespace, descr.DateTo.String(), descr.TotalRows) } return &rowsCreated, err } diff --git a/dashboard/dashapi/dashapi.go b/dashboard/dashapi/dashapi.go index 0fc3651cc..0d4bee371 100644 --- a/dashboard/dashapi/dashapi.go +++ b/dashboard/dashapi/dashapi.go @@ -8,7 +8,6 @@ package dashapi import ( "bytes" "compress/gzip" - "context" "encoding/json" "fmt" "io" @@ -18,10 +17,7 @@ import ( "reflect" "time" - "cloud.google.com/go/civil" "github.com/google/syzkaller/pkg/auth" - "github.com/google/syzkaller/pkg/coveragedb" - "github.com/google/syzkaller/pkg/gcs" ) type Dashboard struct { @@ -692,42 +688,18 @@ func (dash *Dashboard) SaveDiscussion(req *SaveDiscussionReq) error { return dash.Query("save_discussion", req, nil) } -type MergedCoverage struct { - Namespace string - Repo string - Commit string - Duration int64 - DateTo civil.Date - TotalRows int64 - FileData coveragedb.ManagersCoverage -} - -type SaveCoverageReq struct { - Coverage *MergedCoverage -} - -// SaveCoverage returns amount of records created in db. -func (dash *Dashboard) SaveCoverage(req *SaveCoverageReq) (int, error) { +func (dash *Dashboard) CreateUploadURL() (string, error) { uploadURL := new(string) - if err := dash.Query("create_upload_url", req, uploadURL); err != nil { - return 0, fmt.Errorf("create_upload_url: %w", err) - } - - gcsClient, err := gcs.NewClient(context.Background()) - if err != nil { - return 0, fmt.Errorf("gcs.NewClient: %w", err) - } - w, err := gcsClient.FileWriter(*uploadURL) - if err != nil { - return 0, fmt.Errorf("gcsClient.FileWriter: %w", err) - } - defer w.Close() - if err := json.NewEncoder(gzip.NewWriter(w)).Encode(req); err != nil { - return 0, fmt.Errorf("json.NewEncoder(gzip.NewWriter(w)).Encode: %w", err) + if err := dash.Query("create_upload_url", nil, uploadURL); err != nil { + return "", fmt.Errorf("create_upload_url: %w", err) } + return *uploadURL, nil +} +// SaveCoverage returns amount of records created in db. +func (dash *Dashboard) SaveCoverage(gcpURL string) (int, error) { rowsWritten := new(int) - if err := dash.Query("save_coverage", *uploadURL, rowsWritten); err != nil { + if err := dash.Query("save_coverage", gcpURL, rowsWritten); err != nil { return 0, fmt.Errorf("save_coverage: %w", err) } return *rowsWritten, nil diff --git a/pkg/cover/file.go b/pkg/cover/file.go index ff2b7a0ec..2939935b9 100644 --- a/pkg/cover/file.go +++ b/pkg/cover/file.go @@ -78,14 +78,24 @@ func GetMergeResult(c context.Context, ns, repo, forCommit, sourceCommit, filePa return nil, fmt.Errorf("failed to dbReader.Reader: %w", err) } - mergeResult, err := covermerger.MergeCSVData(config, csvReader) - if err != nil { + ch := make(chan *covermerger.FileMergeResult, 1) + if err := covermerger.MergeCSVData(config, csvReader, ch); err != nil { return nil, fmt.Errorf("error merging coverage: %w", err) } - if _, exist := mergeResult[filePath]; !exist { + + var mr *covermerger.MergeResult + select { + case fmr := <-ch: + if fmr != nil { + mr = fmr.MergeResult + } + default: + } + + if mr != nil { return nil, fmt.Errorf("no merge result for file %s", filePath) } - return mergeResult[filePath], nil + return mr, nil } func rendResult(content string, coverage *covermerger.MergeResult, renderConfig *CoverageRenderConfig) string { diff --git a/pkg/coveragedb/spanner.go b/pkg/coveragedb/spanner.go index db6d9ac79..e2a197e0c 100644 --- a/pkg/coveragedb/spanner.go +++ b/pkg/coveragedb/spanner.go @@ -4,8 +4,11 @@ package coveragedb import ( + "bufio" "context" + "encoding/json" "fmt" + "io" "os" "sync/atomic" "time" @@ -51,11 +54,6 @@ func NewClient(ctx context.Context, projectID string) (*spanner.Client, error) { return spanner.NewClient(ctx, database) } -type ManagersCoverage map[string]ManagerCoverage - -// ManagerCoverage is a file to coverage mapping. -type ManagerCoverage map[string]*Coverage - type Coverage struct { Instrumented int64 Covered int64 @@ -72,8 +70,14 @@ func (c *Coverage) AddLineHitCount(line, hitCount int) { } } -func SaveMergeResult(ctx context.Context, projectID string, manCovMap ManagersCoverage, - template *HistoryRecord, totalRows int64, sss []*subsystem.Subsystem) (int, error) { +type MergedCoverageRecord struct { + Manager string + FilePath string + FileData *Coverage +} + +func SaveMergeResult(ctx context.Context, projectID string, descr *HistoryRecord, jsonl io.Reader, + sss []*subsystem.Subsystem) (int, error) { client, err := NewClient(ctx, projectID) if err != nil { return 0, fmt.Errorf("spanner.NewClient() failed: %s", err.Error()) @@ -87,25 +91,30 @@ func SaveMergeResult(ctx context.Context, projectID string, manCovMap ManagersCo session := uuid.New().String() mutations := []*spanner.Mutation{} - for manager, covMap := range manCovMap { - for filePath, record := range covMap { - mutations = append(mutations, fileRecordMutation(session, manager, filePath, record)) - subsystems := fileSubsystems(filePath, ssMatcher, ssCache) - mutations = append(mutations, fileSubsystemsMutation(template.Namespace, filePath, subsystems)) - // There is a limit on the number of mutations per transaction (80k) imposed by the DB. - // This includes both explicit mutations of the fields (6 fields * 1k records = 6k mutations) - // and implicit index mutations. - // We keep the number of records low enough for the number of explicit mutations * 10 does not exceed the limit. - if len(mutations) > 1000 { - if _, err = client.Apply(ctx, mutations); err != nil { - return rowsCreated, fmt.Errorf("failed to spanner.Apply(inserts): %s", err.Error()) - } - rowsCreated += len(mutations) - mutations = nil + jsonlScanner := bufio.NewScanner(jsonl) + + for jsonlScanner.Scan() { + var mcr MergedCoverageRecord + if err := json.Unmarshal([]byte(jsonlScanner.Text()), &mcr); err != nil { + return rowsCreated, fmt.Errorf("json.Unmarshal(MergedCoverageRecord): %w", err) + } + mutations = append(mutations, fileRecordMutation(session, &mcr)) + subsystems := fileSubsystems(mcr.FilePath, ssMatcher, ssCache) + mutations = append(mutations, fileSubsystemsMutation(descr.Namespace, mcr.FilePath, subsystems)) + // There is a limit on the number of mutations per transaction (80k) imposed by the DB. + // This includes both explicit mutations of the fields (6 fields * 1k records = 6k mutations) + // and implicit index mutations. + // We keep the number of records low enough for the number of explicit mutations * 10 does not exceed the limit. + if len(mutations) > 1000 { + if _, err = client.Apply(ctx, mutations); err != nil { + return rowsCreated, fmt.Errorf("failed to spanner.Apply(inserts): %s", err.Error()) } + rowsCreated += len(mutations) + mutations = nil } } - mutations = append(mutations, historyMutation(session, template, totalRows)) + + mutations = append(mutations, historyMutation(session, descr)) if _, err = client.Apply(ctx, mutations); err != nil { return rowsCreated, fmt.Errorf("failed to spanner.Apply(inserts): %s", err.Error()) } @@ -170,7 +179,7 @@ func ReadLinesHitCount(ctx context.Context, ns, commit, file string, tp TimePeri return res, nil } -func historyMutation(session string, template *HistoryRecord, totalRows int64) *spanner.Mutation { +func historyMutation(session string, template *HistoryRecord) *spanner.Mutation { historyInsert, err := spanner.InsertOrUpdateStruct("merge_history", &HistoryRecord{ Session: session, Time: time.Now(), @@ -179,7 +188,7 @@ func historyMutation(session string, template *HistoryRecord, totalRows int64) * Commit: template.Commit, Duration: template.Duration, DateTo: template.DateTo, - TotalRows: totalRows, + TotalRows: template.TotalRows, }) if err != nil { panic(fmt.Sprintf("failed to spanner.InsertStruct(): %s", err.Error())) @@ -187,15 +196,15 @@ func historyMutation(session string, template *HistoryRecord, totalRows int64) * return historyInsert } -func fileRecordMutation(session, manager, filePath string, record *Coverage) *spanner.Mutation { +func fileRecordMutation(session string, mcr *MergedCoverageRecord) *spanner.Mutation { insert, err := spanner.InsertOrUpdateStruct("files", &FilesRecord{ Session: session, - FilePath: filePath, - Instrumented: record.Instrumented, - Covered: record.Covered, - LinesInstrumented: record.LinesInstrumented, - HitCounts: record.HitCounts, - Manager: manager, + FilePath: mcr.FilePath, + Instrumented: mcr.FileData.Instrumented, + Covered: mcr.FileData.Covered, + LinesInstrumented: mcr.FileData.LinesInstrumented, + HitCounts: mcr.FileData.HitCounts, + Manager: mcr.Manager, }) if err != nil { panic(fmt.Sprintf("failed to fileRecordMutation: %v", err)) diff --git a/pkg/covermerger/covermerger.go b/pkg/covermerger/covermerger.go index 394b606de..0f6ff43e4 100644 --- a/pkg/covermerger/covermerger.go +++ b/pkg/covermerger/covermerger.go @@ -10,7 +10,6 @@ import ( "fmt" "io" "strconv" - "sync" "github.com/google/syzkaller/pkg/log" "golang.org/x/exp/maps" @@ -127,13 +126,16 @@ func isSchema(fields, schema []string) bool { return true } -type FilesMergeResults map[string]*MergeResult +type FileMergeResult struct { + FilePath string + *MergeResult +} -func MergeCSVData(config *Config, reader io.Reader) (FilesMergeResults, error) { +func MergeCSVData(config *Config, reader io.Reader, results chan<- *FileMergeResult) error { var schema []string csvReader := csv.NewReader(reader) if fields, err := csvReader.Read(); err != nil { - return nil, fmt.Errorf("failed to read schema: %w", err) + return fmt.Errorf("failed to read schema: %w", err) } else { schema = fields } @@ -163,13 +165,13 @@ func MergeCSVData(config *Config, reader io.Reader) (FilesMergeResults, error) { } errStreamChan <- nil }() - mergeResult, errMerging := mergeChanData(config, recordsChan) + errMerging := mergeChanData(config, recordsChan, results) errStream := <-errStreamChan if errMerging != nil || errStream != nil { - return nil, fmt.Errorf("errors merging stream data:\nmerger err: %w\nstream reader err: %w", + return fmt.Errorf("errors merging stream data:\nmerger err: %w\nstream reader err: %w", errMerging, errStream) } - return mergeResult, nil + return nil } type FileRecords struct { @@ -177,33 +179,26 @@ type FileRecords struct { records []*FileRecord } -func mergeChanData(c *Config, recordChan <-chan *FileRecord) (FilesMergeResults, error) { +func mergeChanData(c *Config, recordChan <-chan *FileRecord, results chan<- *FileMergeResult) error { g, ctx := errgroup.WithContext(context.Background()) frecordChan := groupFileRecords(recordChan, ctx) - stat := make(FilesMergeResults) - var mu sync.Mutex + for i := 0; i < c.Jobs; i++ { g.Go(func() error { for frecord := range frecordChan { - if mr, err := batchFileData(c, frecord.fileName, frecord.records); err != nil { + mr, err := batchFileData(c, frecord.fileName, frecord.records) + if err != nil { return fmt.Errorf("failed to batchFileData(%s): %w", frecord.fileName, err) - } else { - mu.Lock() - if _, exist := stat[frecord.fileName]; exist { - mu.Unlock() - return fmt.Errorf("file %s was already processed", frecord.fileName) - } - stat[frecord.fileName] = mr - mu.Unlock() + } + results <- &FileMergeResult{ + FilePath: frecord.fileName, + MergeResult: mr, } } return nil }) } - if err := g.Wait(); err != nil { - return nil, err - } - return stat, nil + return g.Wait() } func groupFileRecords(recordChan <-chan *FileRecord, ctx context.Context) chan FileRecords { diff --git a/pkg/covermerger/covermerger_test.go b/pkg/covermerger/covermerger_test.go index b63ae36be..4580c114f 100644 --- a/pkg/covermerger/covermerger_test.go +++ b/pkg/covermerger/covermerger_test.go @@ -172,7 +172,14 @@ samp_time,1,360,arch,b1,ci-mock,git://repo,master,commit2,not_changed.c,func1,4, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - aggregation, err := MergeCSVData( + ch := make(chan *FileMergeResult) + aggregation := make(map[string]*MergeResult) + go func() { + for fmr := range ch { + aggregation[fmr.FilePath] = fmr.MergeResult + } + }() + err := MergeCSVData( &Config{ Jobs: 2, skipRepoClone: true, @@ -183,10 +190,11 @@ samp_time,1,360,arch,b1,ci-mock,git://repo,master,commit2,not_changed.c,func1,4, FileVersProvider: &fileVersProviderMock{Workdir: test.workdir}, }, strings.NewReader(test.bqTable), + ch, ) assert.Nil(t, err) - var expectedAggregation FilesMergeResults + var expectedAggregation map[string]*MergeResult assert.Nil(t, json.Unmarshal([]byte(test.simpleAggregation), &expectedAggregation)) if !test.checkDetails { ignoreLineDetailsInTest(aggregation) @@ -196,7 +204,7 @@ samp_time,1,360,arch,b1,ci-mock,git://repo,master,commit2,not_changed.c,func1,4, } } -func ignoreLineDetailsInTest(results FilesMergeResults) { +func ignoreLineDetailsInTest(results map[string]*MergeResult) { for _, mr := range results { mr.LineDetails = nil } diff --git a/tools/syz-covermerger/syz_covermerger.go b/tools/syz-covermerger/syz_covermerger.go index 2c0ec8845..a58ef5d8a 100644 --- a/tools/syz-covermerger/syz_covermerger.go +++ b/tools/syz-covermerger/syz_covermerger.go @@ -4,20 +4,24 @@ package main import ( + "compress/gzip" "context" + "encoding/json" "flag" "fmt" + "io" "runtime" "slices" - "sort" "cloud.google.com/go/civil" "github.com/google/syzkaller/dashboard/dashapi" "github.com/google/syzkaller/pkg/coveragedb" "github.com/google/syzkaller/pkg/covermerger" + "github.com/google/syzkaller/pkg/gcs" "github.com/google/syzkaller/pkg/log" _ "github.com/google/syzkaller/pkg/subsystem/lists" "golang.org/x/exp/maps" + "golang.org/x/sync/errgroup" ) var ( @@ -47,6 +51,12 @@ func makeProvider() covermerger.FileVersProvider { } func main() { + if err := do(); err != nil { + log.Fatalf("failed to saveCoverage: %v", err.Error()) + } +} + +func do() error { flag.Parse() config := &covermerger.Config{ Jobs: runtime.NumCPU(), @@ -78,44 +88,92 @@ func main() { if errReader != nil { panic(fmt.Sprintf("failed to dbReader.Reader: %v", errReader.Error())) } - mergeResult, errMerge := covermerger.MergeCSVData(config, csvReader) - if errMerge != nil { - panic(errMerge) + var wc io.WriteCloser + var url string + if *flagToDashAPI != "" { + dash, err := dashapi.New(*flagDashboardClientName, *flagToDashAPI, "") + if err != nil { + return fmt.Errorf("dashapi.New: %w", err) + } + url, err = dash.CreateUploadURL() + if err != nil { + return fmt.Errorf("dash.CreateUploadURL: %w", err) + } + gcsClient, err := gcs.NewClient(context.Background()) + if err != nil { + return fmt.Errorf("gcs.NewClient: %w", err) + } + defer gcsClient.Close() + wc, err = gcsClient.FileWriter(url) + if err != nil { + return fmt.Errorf("gcsClient.FileWriter: %w", err) + } + } + eg := errgroup.Group{} + mergeResults := make(chan *covermerger.FileMergeResult) + eg.Go(func() error { + defer close(mergeResults) + if err := covermerger.MergeCSVData(config, csvReader, mergeResults); err != nil { + return fmt.Errorf("covermerger.MergeCSVData: %w", err) + } + return nil + }) + var totalInstrumentedLines, totalCoveredLines int + eg.Go(func() error { + encoder := json.NewEncoder(gzip.NewWriter(wc)) + if encoder != nil { + if err := encoder.Encode(&coveragedb.HistoryRecord{ + Namespace: *flagNamespace, + Repo: *flagRepo, + Commit: *flagCommit, + Duration: *flagDuration, + DateTo: dateTo, + TotalRows: *flagTotalRows, + }); err != nil { + return fmt.Errorf("encoder.Encode(MergedCoverageDescription): %w", err) + } + } + for fileMergeResult := range mergeResults { + dashCoverageRecords := mergedCoverageRecords(fileMergeResult) + if encoder != nil { + for _, record := range dashCoverageRecords { + if err := encoder.Encode(record); err != nil { + return fmt.Errorf("encoder.Encode(MergedCoverageRecord): %w", err) + } + } + } + for _, hitCount := range fileMergeResult.HitCounts { + totalInstrumentedLines++ + if hitCount > 0 { + totalCoveredLines++ + } + } + } + if err := wc.Close(); err != nil { + return fmt.Errorf("wc.Close: %w", err) + } + return nil + }) + if err := eg.Wait(); err != nil { + return fmt.Errorf("eg.Wait: %w", err) } - coverage, totalInstrumentedLines, totalCoveredLines := mergeResultsToCoverage(mergeResult) printCoverage(totalInstrumentedLines, totalCoveredLines) - managers := maps.Keys(coverage) - sort.Strings(managers) - fmt.Printf("merged signals for the following managers: %v\n", managers) if *flagToDashAPI != "" { - if rowsCreated, err := saveCoverage(*flagToDashAPI, *flagDashboardClientName, &dashapi.MergedCoverage{ - Namespace: *flagNamespace, - Repo: *flagRepo, - Commit: *flagCommit, - Duration: *flagDuration, - DateTo: dateTo, - TotalRows: *flagTotalRows, - FileData: coverage, - }); err != nil { - log.Fatalf("failed to saveCoverage: %v", err) + dash, err := dashapi.New(*flagDashboardClientName, *flagToDashAPI, "") + if err != nil { + return fmt.Errorf("dashapi.New: %w", err) + } + if rowsCreated, err := dash.SaveCoverage(url); err != nil { + return fmt.Errorf("dash.SaveCoverage: %w", err) } else { fmt.Printf("created %d DB rows\n", rowsCreated) } } + return nil } -func saveCoverage(dashboard, clientName string, d *dashapi.MergedCoverage) (int, error) { - dash, err := dashapi.New(clientName, dashboard, "") - if err != nil { - return 0, fmt.Errorf("dashapi.New: %w", err) - } - return dash.SaveCoverage(&dashapi.SaveCoverageReq{ - Coverage: d, - }) -} - -func printCoverage(instrumented, covered int64) { +func printCoverage(instrumented, covered int) { coverage := 0.0 if instrumented != 0 { coverage = float64(covered) / float64(instrumented) @@ -126,42 +184,38 @@ func printCoverage(instrumented, covered int64) { const allManagers = "*" -// Returns per manager merge result, total instrumented and total covered lines. -func mergeResultsToCoverage(mergedCoverage map[string]*covermerger.MergeResult, -) (coveragedb.ManagersCoverage, int64, int64) { - res := make(coveragedb.ManagersCoverage) - res[allManagers] = make(coveragedb.ManagerCoverage) - var totalInstrumented, totalCovered int64 - for fileName, lineStat := range mergedCoverage { - if !lineStat.FileExists { - continue - } - if _, ok := res[allManagers][fileName]; !ok { - res[allManagers][fileName] = &coveragedb.Coverage{} - } - - lines := maps.Keys(lineStat.HitCounts) - slices.Sort(lines) +func mergedCoverageRecords(fmr *covermerger.FileMergeResult, +) []*coveragedb.MergedCoverageRecord { + if !fmr.FileExists { + return nil + } + lines := maps.Keys(fmr.HitCounts) + slices.Sort(lines) + mgrStat := make(map[string]*coveragedb.Coverage) + mgrStat[allManagers] = &coveragedb.Coverage{} - for _, line := range lines { - res[allManagers][fileName].AddLineHitCount(line, lineStat.HitCounts[line]) - managerHitCounts := map[string]int{} - for _, lineDetail := range lineStat.LineDetails[line] { - manager := lineDetail.Manager - managerHitCounts[manager] += lineDetail.HitCount - } - for manager, managerHitCount := range managerHitCounts { - if _, ok := res[manager]; !ok { - res[manager] = make(coveragedb.ManagerCoverage) - } - if _, ok := res[manager][fileName]; !ok { - res[manager][fileName] = &coveragedb.Coverage{} - } - res[manager][fileName].AddLineHitCount(line, managerHitCount) + for _, line := range lines { + mgrStat[allManagers].AddLineHitCount(line, fmr.HitCounts[line]) + managerHitCounts := map[string]int{} + for _, lineDetail := range fmr.LineDetails[line] { + manager := lineDetail.Manager + managerHitCounts[manager] += lineDetail.HitCount + } + for manager, managerHitCount := range managerHitCounts { + if _, ok := mgrStat[manager]; !ok { + mgrStat[manager] = &coveragedb.Coverage{} } + mgrStat[manager].AddLineHitCount(line, managerHitCount) } - totalInstrumented += res[allManagers][fileName].Instrumented - totalCovered += res[allManagers][fileName].Covered } - return res, totalInstrumented, totalCovered + + res := []*coveragedb.MergedCoverageRecord{} + for managerName, managerCoverage := range mgrStat { + res = append(res, &coveragedb.MergedCoverageRecord{ + Manager: managerName, + FilePath: fmr.FilePath, + FileData: managerCoverage, + }) + } + return res } |
