aboutsummaryrefslogtreecommitdiffstats
path: root/pkg/image
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2024-07-05 11:37:34 +0200
committerDmitry Vyukov <dvyukov@google.com>2024-07-05 11:43:19 +0000
commite5f6d2961cef719e286f3f5f7f4ab868fc4ba7cd (patch)
tree0e50c3d141fffd59eb60f2bd7c21fa0440f459ae /pkg/image
parentc60038a3f5efcf3a117af946fdff6c2c630215df (diff)
pkg/image: provide stats about images
Diffstat (limited to 'pkg/image')
-rw-r--r--pkg/image/compression.go9
-rw-r--r--pkg/image/compression_optimized.go18
2 files changed, 26 insertions, 1 deletions
diff --git a/pkg/image/compression.go b/pkg/image/compression.go
index d41392019..edc144de2 100644
--- a/pkg/image/compression.go
+++ b/pkg/image/compression.go
@@ -9,6 +9,15 @@ import (
"encoding/base64"
"fmt"
"io"
+ "sync/atomic"
+)
+
+var (
+ // Total amount of images in memory and consumed memory (in bytes).
+ // Currently maintained only by the optimized implementation.
+ // Cannot import stats package due to import cycles.
+ StatImages atomic.Int64
+ StatMemory atomic.Int64
)
func Compress(rawData []byte) []byte {
diff --git a/pkg/image/compression_optimized.go b/pkg/image/compression_optimized.go
index ea6be9569..819debc6e 100644
--- a/pkg/image/compression_optimized.go
+++ b/pkg/image/compression_optimized.go
@@ -22,9 +22,12 @@ type decompressScratch struct {
buf []byte
}
+// This is just for memory consumption estimation, does not need to be precise.
+const pageSize = 4 << 10
+
var decompressPool = sync.Pool{New: func() interface{} {
return &decompressScratch{
- buf: make([]byte, 8<<10),
+ buf: make([]byte, pageSize),
}
}}
@@ -63,11 +66,15 @@ func mustDecompress(compressed []byte) (data []byte, dtor func()) {
if err != nil {
panic(err)
}
+ pages := 0
dtor = func() {
+ StatImages.Add(-1)
+ StatMemory.Add(int64(-pages * pageSize))
if err := syscall.Munmap(data[:maxImageSize]); err != nil {
panic(err)
}
}
+ pagedIn := 0
offset := 0
for {
n, err := scratch.zr.Read(scratch.buf)
@@ -92,6 +99,7 @@ func mustDecompress(compressed []byte) (data []byte, dtor func()) {
// or whatever is the alignment for such large objects). We could also break from the middle
// of the loop before updating src/dst pointers, but it hurts codegen a lot (compilers like
// canonical loop forms).
+ hasData := false
words := uintptr(n-1) / wordSize
src := (*word)(unsafe.Pointer(&scratch.buf[0]))
dst := (*word)(unsafe.Pointer(&data[offset]))
@@ -101,16 +109,24 @@ func mustDecompress(compressed []byte) (data []byte, dtor func()) {
}
src = (*word)(unsafe.Pointer(uintptr(unsafe.Pointer(src)) + wordSize))
dst = (*word)(unsafe.Pointer(uintptr(unsafe.Pointer(dst)) + wordSize))
+ hasData = true
}
// Copy any remaining trailing bytes.
for i := words * wordSize; i < uintptr(n); i++ {
v := scratch.buf[i]
if v != 0 {
data[uintptr(offset)+i] = v
+ hasData = true
}
}
+ if hasData && offset >= pagedIn {
+ pagedIn = (offset + n + pageSize - 1) & ^(pageSize - 1)
+ pages++
+ }
offset += n
}
data = data[:offset]
+ StatImages.Add(1)
+ StatMemory.Add(int64(pages * pageSize))
return
}