aboutsummaryrefslogtreecommitdiffstats
path: root/pkg/kcov/kcov.go
blob: 0400a32ff05b4948e107491443ed7da96f46e0eb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
// Copyright 2025 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.

//go:build linux

// Package kcov provides Go native code for collecting kernel coverage (KCOV)
// information.
package kcov

import (
	"os"
	"runtime"
	"sync/atomic"
	"unsafe"

	"golang.org/x/sys/unix"
)

const (
	kcovPath = "/sys/kernel/debug/kcov"
	// This is the same value used by the linux executor, see executor_linux.h.
	kcovCoverSize = 512 << 10
)

// Holds resources for a single traced thread.
type KCOVState struct {
	file  *os.File
	cover []byte
}

type KCOVTraceResult struct {
	Result   error     // Result of the call.
	Coverage []uintptr // Collected program counters.
}

// Trace invokes `f` and returns a KCOVTraceResult.
func (st *KCOVState) Trace(f func() error) KCOVTraceResult {
	// First 8 bytes holds the number of collected PCs since last poll.
	countPtr := (*uintptr)(unsafe.Pointer(&st.cover[0]))
	// Reset coverage for this run.
	atomic.StoreUintptr(countPtr, 0)
	// Trigger call.
	err := f()
	// Load the number of PCs that were hit during trigger.
	n := atomic.LoadUintptr(countPtr)

	pcDataPtr := (*uintptr)(unsafe.Pointer(&st.cover[sizeofUintPtr]))
	pcs := unsafe.Slice(pcDataPtr, n)
	pcsCopy := make([]uintptr, n)
	copy(pcsCopy, pcs)
	return KCOVTraceResult{Result: err, Coverage: pcsCopy}
}

// EnableTracingForCurrentGoroutine prepares the current goroutine for kcov tracing.
// It must be paired with a call to DisableTracing.
func EnableTracingForCurrentGoroutine() (st *KCOVState, err error) {
	st = &KCOVState{}
	defer func() {
		if err != nil {
			// The original error is more important, so we ignore any potential
			// errors that result from cleaning up.
			_ = st.DisableTracing()
		}
	}()

	// KCOV is per-thread, so lock goroutine to its current OS thread.
	runtime.LockOSThread()

	file, err := os.OpenFile(kcovPath, os.O_RDWR, 0)
	if err != nil {
		return nil, err
	}
	st.file = file

	// Setup trace mode and size.
	if err := unix.IoctlSetInt(int(st.file.Fd()), uint(kcovInitTrace), kcovCoverSize); err != nil {
		return nil, err
	}

	// Mmap buffer shared between kernel- and user-space. For more information,
	// see the Linux KCOV documentation: https://docs.kernel.org/dev-tools/kcov.html.
	st.cover, err = unix.Mmap(
		int(st.file.Fd()),
		0, // Offset.
		kcovCoverSize*sizeofUintPtr,
		unix.PROT_READ|unix.PROT_WRITE,
		unix.MAP_SHARED,
	)
	if err != nil {
		return nil, err
	}

	// Enable coverage collection on the current thread.
	if err := unix.IoctlSetInt(int(st.file.Fd()), uint(kcovEnable), kcovTracePC); err != nil {
		return nil, err
	}
	return st, nil
}

// DisableTracing disables KCOV tracing for the current Go routine. On failure,
// it returns the first error that occurred during cleanup.
func (st *KCOVState) DisableTracing() error {
	var firstErr error
	if err := unix.IoctlSetInt(int(st.file.Fd()), uint(kcovDisable), kcovTracePC); err != nil {
		firstErr = err
	}
	if err := unix.Munmap(st.cover); err != nil && firstErr == nil {
		firstErr = err
	}
	if err := st.file.Close(); err != nil && firstErr == nil {
		firstErr = err
	}
	runtime.UnlockOSThread()
	return firstErr
}