diff options
| author | Dmitry Vyukov <dvyukov@google.com> | 2024-07-05 11:37:34 +0200 |
|---|---|---|
| committer | Dmitry Vyukov <dvyukov@google.com> | 2024-07-05 11:43:19 +0000 |
| commit | e5f6d2961cef719e286f3f5f7f4ab868fc4ba7cd (patch) | |
| tree | 0e50c3d141fffd59eb60f2bd7c21fa0440f459ae | |
| parent | c60038a3f5efcf3a117af946fdff6c2c630215df (diff) | |
pkg/image: provide stats about images
| -rw-r--r-- | pkg/image/compression.go | 9 | ||||
| -rw-r--r-- | pkg/image/compression_optimized.go | 18 | ||||
| -rw-r--r-- | syz-manager/stats.go | 11 |
3 files changed, 37 insertions, 1 deletions
diff --git a/pkg/image/compression.go b/pkg/image/compression.go index d41392019..edc144de2 100644 --- a/pkg/image/compression.go +++ b/pkg/image/compression.go @@ -9,6 +9,15 @@ import ( "encoding/base64" "fmt" "io" + "sync/atomic" +) + +var ( + // Total amount of images in memory and consumed memory (in bytes). + // Currently maintained only by the optimized implementation. + // Cannot import stats package due to import cycles. + StatImages atomic.Int64 + StatMemory atomic.Int64 ) func Compress(rawData []byte) []byte { diff --git a/pkg/image/compression_optimized.go b/pkg/image/compression_optimized.go index ea6be9569..819debc6e 100644 --- a/pkg/image/compression_optimized.go +++ b/pkg/image/compression_optimized.go @@ -22,9 +22,12 @@ type decompressScratch struct { buf []byte } +// This is just for memory consumption estimation, does not need to be precise. +const pageSize = 4 << 10 + var decompressPool = sync.Pool{New: func() interface{} { return &decompressScratch{ - buf: make([]byte, 8<<10), + buf: make([]byte, pageSize), } }} @@ -63,11 +66,15 @@ func mustDecompress(compressed []byte) (data []byte, dtor func()) { if err != nil { panic(err) } + pages := 0 dtor = func() { + StatImages.Add(-1) + StatMemory.Add(int64(-pages * pageSize)) if err := syscall.Munmap(data[:maxImageSize]); err != nil { panic(err) } } + pagedIn := 0 offset := 0 for { n, err := scratch.zr.Read(scratch.buf) @@ -92,6 +99,7 @@ func mustDecompress(compressed []byte) (data []byte, dtor func()) { // or whatever is the alignment for such large objects). We could also break from the middle // of the loop before updating src/dst pointers, but it hurts codegen a lot (compilers like // canonical loop forms). + hasData := false words := uintptr(n-1) / wordSize src := (*word)(unsafe.Pointer(&scratch.buf[0])) dst := (*word)(unsafe.Pointer(&data[offset])) @@ -101,16 +109,24 @@ func mustDecompress(compressed []byte) (data []byte, dtor func()) { } src = (*word)(unsafe.Pointer(uintptr(unsafe.Pointer(src)) + wordSize)) dst = (*word)(unsafe.Pointer(uintptr(unsafe.Pointer(dst)) + wordSize)) + hasData = true } // Copy any remaining trailing bytes. for i := words * wordSize; i < uintptr(n); i++ { v := scratch.buf[i] if v != 0 { data[uintptr(offset)+i] = v + hasData = true } } + if hasData && offset >= pagedIn { + pagedIn = (offset + n + pageSize - 1) & ^(pageSize - 1) + pages++ + } offset += n } data = data[:offset] + StatImages.Add(1) + StatMemory.Add(int64(pages * pageSize)) return } diff --git a/syz-manager/stats.go b/syz-manager/stats.go index 0f49c33db..cffa7e4c1 100644 --- a/syz-manager/stats.go +++ b/syz-manager/stats.go @@ -8,6 +8,7 @@ import ( "runtime" "time" + "github.com/google/syzkaller/pkg/image" "github.com/google/syzkaller/pkg/stats" ) @@ -68,5 +69,15 @@ func (mgr *Manager) initStats() { }, func(v int, period time.Duration) string { return fmt.Sprintf("%v MB", v>>20) }) + stats.Create("images memory", "Uncompressed images memory (bytes)", stats.Graph("memory"), + func() int { + return int(image.StatMemory.Load()) + }, func(v int, period time.Duration) string { + return fmt.Sprintf("%v MB", v>>20) + }) + stats.Create("uncompressed images", "Total number of uncompressed images in memory", + func() int { + return int(image.StatImages.Load()) + }) mgr.statCoverFiltered = stats.Create("filtered coverage", "", stats.NoGraph) } |
