diff options
| author | Aleksandr Nogikh <nogikh@google.com> | 2024-04-16 16:43:07 +0200 |
|---|---|---|
| committer | Aleksandr Nogikh <nogikh@google.com> | 2024-04-29 09:54:32 +0000 |
| commit | 537827702dafc1f4308a4ed9c57f52b779406bcf (patch) | |
| tree | 80bd1c086e6c4226b03f3f010d2fbf7dcfc24478 | |
| parent | a4122dfc7e3c556c64b5e07d3995b0e6cb856c27 (diff) | |
tools/syz-testbed: limit the number of graph data points
In long runs, it can be thousands of points for every single graph
that we render. It's much more than is actually needed.
| -rw-r--r-- | tools/syz-testbed/stats.go | 14 |
1 files changed, 13 insertions, 1 deletions
diff --git a/tools/syz-testbed/stats.go b/tools/syz-testbed/stats.go index 1f6bb4443..d45982c2b 100644 --- a/tools/syz-testbed/stats.go +++ b/tools/syz-testbed/stats.go @@ -417,7 +417,19 @@ func (group *RunResultGroup) SaveAvgBenchFile(fileName string) error { return err } defer f.Close() - for _, averaged := range group.AvgStatRecords() { + // In long runs, we collect a lot of stat samples, which results + // in large and slow to load graphs. A subset of 128-256 data points + // seems to be a reasonable enough precision. + records := group.AvgStatRecords() + const targetRecords = 128 + for len(records) > targetRecords*2 { + newRecords := make([]map[string]uint64, 0, len(records)/2) + for i := 0; i < len(records); i += 2 { + newRecords = append(newRecords, records[i]) + } + records = newRecords + } + for _, averaged := range records { data, err := json.MarshalIndent(averaged, "", " ") if err != nil { return err |
