aboutsummaryrefslogtreecommitdiffstats
path: root/prog/kfuzztest.go
blob: dacd54885c83ba332c364b535aaed93a2977d5f5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
// Copyright 2025 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package prog

import (
	"bytes"
	"encoding/binary"
	"fmt"
)

const (
	kFuzzTestRegionIDNull uint32 = ^uint32(0)
	kFuzzTestPoisonSize   uint64 = 0x8

	kFuzzTestVersion    uint32 = 0
	kFuzzTestMagic      uint32 = 0xBFACE
	kFuzzTestPrefixSize        = 8

	// Minimum region alignment required by KFuzzTest. This is exposed by the
	// /sys/kernel/debug/kfuzztest/_config/minalign debugfs file. This value
	// always equals MAX(ARCH_KMALLOC_MINALIGN, KFUZZTEST_POISON_SIZE) = 8 on
	// x86_64, so we hardcode it for now. A more robust solution would involve
	// reading this from the debugfs entry at boot before fuzzing begins.
	kFuzzTestMinalign uint64 = 8

	// Maximum input size accepted by the KFuzzTest kernel module.
	KFuzzTestMaxInputSize uint64 = 64 << 10
)

func kFuzzTestWritePrefix(buf *bytes.Buffer) {
	prefix := (uint64(kFuzzTestVersion) << 32) | uint64(kFuzzTestMagic)
	binary.Write(buf, binary.LittleEndian, prefix)
}

func isPowerOfTwo(n uint64) bool {
	return n > 0 && (n&(n-1) == 0)
}

func roundUpPowerOfTwo(x, n uint64) uint64 {
	if !isPowerOfTwo(n) {
		panic("n was not a power of 2")
	}
	return (x + n - 1) &^ (n - 1)
}

// Pad b so that it's length is a multiple of alignment, with at least
// minPadding bytes of padding, where alignment is a power of 2.
func padWithAlignment(b *bytes.Buffer, alignment, minPadding uint64) {
	var newSize uint64
	if alignment == 0 {
		newSize = uint64(b.Len()) + minPadding
	} else {
		newSize = roundUpPowerOfTwo(uint64(b.Len())+minPadding, alignment)
	}

	paddingBytes := newSize - uint64(b.Len())
	for range paddingBytes {
		b.WriteByte(byte(0))
	}
}

type sliceQueue[T any] struct {
	q []T
}

func (sq *sliceQueue[T]) push(elem T) {
	sq.q = append(sq.q, elem)
}

func (sq *sliceQueue[T]) pop() T {
	ret := sq.q[0]
	sq.q = sq.q[1:]
	return ret
}

func (sq *sliceQueue[T]) isEmpty() bool {
	return len(sq.q) == 0
}

func newSliceQueue[T any]() *sliceQueue[T] {
	return &sliceQueue[T]{q: make([]T, 0)}
}

type kFuzzTestRelocation struct {
	offset    uint32
	srcRegion Arg
	dstRegion Arg
}

type kFuzzTestRegion struct {
	offset uint32
	size   uint32
}

// The following helpers and definitions follow directly from the C-struct
// definitions in <include/linux/kfuzztest.h>.
const kFuzzTestRegionSize = 8

func kFuzzTestRegionArraySize(numRegions int) int {
	return 4 + kFuzzTestRegionSize*numRegions
}

func kFuzzTestWriteRegion(buf *bytes.Buffer, region kFuzzTestRegion) {
	binary.Write(buf, binary.LittleEndian, region.offset)
	binary.Write(buf, binary.LittleEndian, region.size)
}

func kFuzzTestWriteRegionArray(buf *bytes.Buffer, regions []kFuzzTestRegion) {
	binary.Write(buf, binary.LittleEndian, uint32(len(regions)))
	for _, reg := range regions {
		kFuzzTestWriteRegion(buf, reg)
	}
}

const kFuzzTestRelocationSize = 12

func kFuzzTestRelocTableSize(numRelocs int) int {
	return 8 + kFuzzTestRelocationSize*numRelocs
}

func kFuzzTestWriteReloc(buf *bytes.Buffer, regToID *map[Arg]int, reloc kFuzzTestRelocation) {
	binary.Write(buf, binary.LittleEndian, uint32((*regToID)[reloc.srcRegion]))
	binary.Write(buf, binary.LittleEndian, reloc.offset)
	if reloc.dstRegion == nil {
		binary.Write(buf, binary.LittleEndian, kFuzzTestRegionIDNull)
	} else {
		binary.Write(buf, binary.LittleEndian, uint32((*regToID)[reloc.dstRegion]))
	}
}

func kFuzzTestWriteRelocTable(buf *bytes.Buffer, regToID *map[Arg]int,
	relocations []kFuzzTestRelocation, paddingBytes uint64) {
	binary.Write(buf, binary.LittleEndian, uint32(len(relocations)))
	binary.Write(buf, binary.LittleEndian, uint32(paddingBytes))
	for _, reloc := range relocations {
		kFuzzTestWriteReloc(buf, regToID, reloc)
	}
	buf.Write(make([]byte, paddingBytes))
}

const kFuzzTestPlaceHolderPtr uint64 = 0xFFFFFFFFFFFFFFFF

// Expands a region, and returns a list of relocations that need to be made.
func kFuzzTestExpandRegion(reg Arg) ([]byte, []kFuzzTestRelocation) {
	relocations := []kFuzzTestRelocation{}
	var encoded bytes.Buffer
	queue := newSliceQueue[Arg]()
	queue.push(reg)

	for !queue.isEmpty() {
		arg := queue.pop()
		padWithAlignment(&encoded, arg.Type().Alignment(), 0)

		switch a := arg.(type) {
		case *PointerArg:
			offset := uint32(encoded.Len())
			binary.Write(&encoded, binary.LittleEndian, kFuzzTestPlaceHolderPtr)
			relocations = append(relocations, kFuzzTestRelocation{offset, reg, a.Res})
		case *GroupArg:
			for _, inner := range a.Inner {
				queue.push(inner)
			}
		case *DataArg:
			data := a.data
			buffer, ok := a.ArgCommon.Type().(*BufferType)
			if !ok {
				panic("DataArg should be a BufferType")
			}
			// Unlike length fields whose incorrectness can be prevented easily,
			// it is an invasive change to prevent generation of
			// non-null-terminated strings. Forcibly null-terminating them
			// during encoding allows us to centralize this easily and prevent
			// false positive buffer overflows in KFuzzTest targets.
			if buffer.Kind == BufferString && (len(data) == 0 || data[len(data)-1] != byte(0)) {
				data = append(data, byte(0))
			}
			encoded.Write(data)
		case *ConstArg:
			val, _ := a.Value()
			switch a.Size() {
			case 1:
				binary.Write(&encoded, binary.LittleEndian, uint8(val))
			case 2:
				binary.Write(&encoded, binary.LittleEndian, uint16(val))
			case 4:
				binary.Write(&encoded, binary.LittleEndian, uint32(val))
			case 8:
				binary.Write(&encoded, binary.LittleEndian, val)
			default:
				panic(fmt.Sprintf("unsupported constant size: %d", a.Size()))
			}
			// TODO: handle union args.
		default:
			panic(fmt.Sprintf("tried to serialize unsupported type: %s", a.Type().Name()))
		}
	}

	return encoded.Bytes(), relocations
}

// MarshallKFuzzTestArg serializes a syzkaller Arg into a flat binary format
// understood by the KFuzzTest kernel interface (see `include/linux/kfuzztest.h`).
//
// The goal is to represent a tree-like structure of arguments (which may contain
// pointers and cycles) as a single byte slice that the kernel can deserialize
// into a set of distinct heap allocations.
//
// The binary format consists of three contiguous parts, in this order:
//
//  1. Region Array: A header describing all logical memory regions that will be
//     allocated by the kernel. Each `relocRegion` defines a region's unique `id`,
//     its `size`, its `alignment`, and its `start` offset within the payload.
//     The kernel uses this table to create one distinct heap allocation per region.
//
//  2. Relocation Table: A header containing a list of `relocationEntry` structs.
//     Each entry identifies the location of a pointer field within the payload
//     (via a `regionID` and `regionOffset`) and maps it to the logical region
//     it points to (via a `value` which holds the pointee's `regionID`).
//     A NULL pointer is identified by the special value `kFuzzTestNilPtrVal`.
//
//  3. Payload: The raw, serialized data for all arguments, laid out as a single
//     contiguous block of memory with padded regions as per the KFuzzTest input
//     format's specification defined in `Documentation/dev-tools/kfuzztest.rst`.
//
// Cycles are handled by tracking visited arguments, ensuring that a region is
// only visited and encoded once.
//
// For a concrete example of the final binary layout, see the test cases for this
// function in `prog/kfuzztest_test.go`.
func MarshallKFuzztestArg(topLevel Arg) []byte {
	regions := []kFuzzTestRegion{}
	allRelocations := []kFuzzTestRelocation{}
	visitedRegions := make(map[Arg]int)
	queue := newSliceQueue[Arg]()
	var payload bytes.Buffer
	queue.push(topLevel)
	maxAlignment := uint64(8)

	if topLevel == nil {
		return []byte{}
	}

Loop:
	for {
		if queue.isEmpty() {
			break Loop
		}

		reg := queue.pop()
		if _, visited := visitedRegions[reg]; visited {
			continue Loop
		}

		alignment := max(kFuzzTestMinalign, reg.Type().Alignment())
		maxAlignment = max(maxAlignment, alignment)

		regionData, relocations := kFuzzTestExpandRegion(reg)
		for _, reloc := range relocations {
			if reloc.dstRegion == nil {
				continue
			}
			if _, visited := visitedRegions[reloc.dstRegion]; !visited {
				queue.push(reloc.dstRegion)
			}
		}
		allRelocations = append(allRelocations, relocations...)

		padWithAlignment(&payload, alignment, 0)
		regions = append(regions, kFuzzTestRegion{
			offset: uint32(payload.Len()),
			size:   uint32(len(regionData))},
		)
		visitedRegions[reg] = len(regions) - 1
		payload.Write(regionData)
		// The end of the payload should have at least kFuzzTestPoisonSize bytes
		// of padding, and be aligned to kFuzzTestPoisonSize.
		padWithAlignment(&payload, kFuzzTestPoisonSize, kFuzzTestPoisonSize)
	}

	headerLen := 0x8 // Two integer values - the magic value, and the version number.
	regionArrayLen := kFuzzTestRegionArraySize(len(regions))
	relocTableLen := kFuzzTestRelocTableSize(len(allRelocations))
	metadataLen := headerLen + regionArrayLen + relocTableLen

	// The payload needs to be aligned to max alignment to ensure that all
	// nested structs are properly aligned, and there should be enough padding
	// so that the region before the payload can be poisoned with a redzone.
	paddingBytes := roundUpPowerOfTwo(uint64(metadataLen)+kFuzzTestPoisonSize, maxAlignment) - uint64(metadataLen)

	var out bytes.Buffer
	kFuzzTestWritePrefix(&out)
	kFuzzTestWriteRegionArray(&out, regions)
	kFuzzTestWriteRelocTable(&out, &visitedRegions, allRelocations, paddingBytes)
	out.Write(payload.Bytes())
	return out.Bytes()
}