diff options
| author | Taras Madan <tarasmadan@google.com> | 2025-07-31 16:05:38 +0200 |
|---|---|---|
| committer | Taras Madan <tarasmadan@google.com> | 2025-08-07 08:11:10 +0000 |
| commit | 00dc68fb94791fb479cdd1932b9dd6793f14fefd (patch) | |
| tree | 93270e95d5cef33e8c9862b6e81152c47d274b8b /pkg/coveragedb/coveragedb.go | |
| parent | 9a42d6b1e05dfb4fb8867726c63ab9d1bc9d6840 (diff) | |
pkg/coveragedb: update file to subsystem info periodically
#6070 explains the problem of data propagation.
1. Add weekly /cron/update_coverdb_subsystems.
2. Stop updating subsystems from coverage receiver API.
Diffstat (limited to 'pkg/coveragedb/coveragedb.go')
| -rw-r--r-- | pkg/coveragedb/coveragedb.go | 57 |
1 files changed, 51 insertions, 6 deletions
diff --git a/pkg/coveragedb/coveragedb.go b/pkg/coveragedb/coveragedb.go index 6741e72d1..8272ce6ff 100644 --- a/pkg/coveragedb/coveragedb.go +++ b/pkg/coveragedb/coveragedb.go @@ -85,14 +85,11 @@ type fileSubsystems struct { } func SaveMergeResult(ctx context.Context, client spannerclient.SpannerClient, descr *HistoryRecord, dec *json.Decoder, - sss []*subsystem.Subsystem) (int, error) { +) (int, error) { if client == nil { return 0, fmt.Errorf("nil spannerclient") } var rowsCreated int - ssMatcher := subsystem.MakePathMatcher(sss) - ssCache := make(map[string][]string) - session := uuid.New().String() var mutations []*spanner.Mutation @@ -107,8 +104,6 @@ func SaveMergeResult(ctx context.Context, client spannerclient.SpannerClient, de } if mcr := wr.MCR; mcr != nil { mutations = append(mutations, fileRecordMutation(session, mcr)) - subsystems := getFileSubsystems(mcr.FilePath, ssMatcher, ssCache) - mutations = append(mutations, fileSubsystemsMutation(descr.Namespace, mcr.FilePath, subsystems)) } else if fl := wr.FL; fl != nil { mutations = append(mutations, fileFunctionsMutation(session, fl)) } else { @@ -627,3 +622,53 @@ func UniqCoverage(fullCov, partCov map[int]int64) map[int]int64 { } return res } + +func RegenerateSubsystems(ctx context.Context, ns string, sss []*subsystem.Subsystem, + client spannerclient.SpannerClient) (int, error) { + ssMatcher := subsystem.MakePathMatcher(sss) + ssCache := make(map[string][]string) + filePaths, err := getFilePaths(ctx, ns, client) + if err != nil { + return 0, err + } + var mutations []*spanner.Mutation + for _, filePath := range filePaths { + subsystems := getFileSubsystems(filePath, ssMatcher, ssCache) + mutations = append(mutations, fileSubsystemsMutation(ns, filePath, subsystems)) + } + // There is a limit on the number of mutations per transaction (80k) imposed by the DB. + // Expected mutations count is < 20k and looks safe to do w/o batching. + if _, err = client.Apply(ctx, mutations); err != nil { + return 0, err + } + return len(mutations), nil +} + +func getFilePaths(ctx context.Context, ns string, client spannerclient.SpannerClient) ([]string, error) { + iter := client.Single().Query(ctx, spanner.Statement{ + SQL: `select filepath from file_subsystems where namespace=$1`, + Params: map[string]interface{}{ + "p1": ns, + }, + }) + defer iter.Stop() + + var res []string + for { + row, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, fmt.Errorf("iter.Next: %w", err) + } + var r struct { + Filepath string + } + if err = row.ToStruct(&r); err != nil { + return nil, fmt.Errorf("row.ToStruct: %w", err) + } + res = append(res, r.Filepath) + } + return res, nil +} |
