diff options
| author | Dmitry Vyukov <dvyukov@google.com> | 2024-06-28 16:33:04 +0200 |
|---|---|---|
| committer | Dmitry Vyukov <dvyukov@google.com> | 2024-07-01 13:48:43 +0000 |
| commit | a6f99ace4014896f81a2f101416fd5413579f2bd (patch) | |
| tree | c6ace6c5a8736261fd462e83e19bbb88bd1a2ee3 /pkg/flatrpc | |
| parent | 1f0ee43044bc8fc00bc1eccc85a93bf2b9972dd1 (diff) | |
pkg/rpcserver: move kernel test/data range checks from executor
We see some errors of the form:
SYZFAIL: coverage filter is full
pc=0x80007000c0008 regions=[0xffffffffbfffffff 0x243fffffff 0x143fffffff 0xc3fffffff] alloc=156
Executor shouldn't send non kernel addresses in signal,
but somehow it does. It can happen if the VM memory is corrupted,
or if the test program does something very nasty (e.g. discovers
the output region and writes to it).
It's not possible to reliably filter signal in the tested VM.
Move all of the filtering logic to the host.
Fixes #4942
Diffstat (limited to 'pkg/flatrpc')
| -rw-r--r-- | pkg/flatrpc/flatrpc.fbs | 2 | ||||
| -rw-r--r-- | pkg/flatrpc/flatrpc.go | 92 | ||||
| -rw-r--r-- | pkg/flatrpc/flatrpc.h | 50 |
3 files changed, 107 insertions, 37 deletions
diff --git a/pkg/flatrpc/flatrpc.fbs b/pkg/flatrpc/flatrpc.fbs index 58a6b3250..f0b03c4a9 100644 --- a/pkg/flatrpc/flatrpc.fbs +++ b/pkg/flatrpc/flatrpc.fbs @@ -37,6 +37,8 @@ table ConnectRequestRaw { table ConnectReplyRaw { debug :bool; cover :bool; + cover_edges :bool; + kernel_64_bit :bool; procs :int32; slowdown :int32; syscall_timeout_ms :int32; diff --git a/pkg/flatrpc/flatrpc.go b/pkg/flatrpc/flatrpc.go index 0e46dddbd..87f6ad19d 100644 --- a/pkg/flatrpc/flatrpc.go +++ b/pkg/flatrpc/flatrpc.go @@ -509,6 +509,8 @@ func ConnectRequestRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { type ConnectReplyRawT struct { Debug bool `json:"debug"` Cover bool `json:"cover"` + CoverEdges bool `json:"cover_edges"` + Kernel64Bit bool `json:"kernel_64_bit"` Procs int32 `json:"procs"` Slowdown int32 `json:"slowdown"` SyscallTimeoutMs int32 `json:"syscall_timeout_ms"` @@ -579,6 +581,8 @@ func (t *ConnectReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse ConnectReplyRawStart(builder) ConnectReplyRawAddDebug(builder, t.Debug) ConnectReplyRawAddCover(builder, t.Cover) + ConnectReplyRawAddCoverEdges(builder, t.CoverEdges) + ConnectReplyRawAddKernel64Bit(builder, t.Kernel64Bit) ConnectReplyRawAddProcs(builder, t.Procs) ConnectReplyRawAddSlowdown(builder, t.Slowdown) ConnectReplyRawAddSyscallTimeoutMs(builder, t.SyscallTimeoutMs) @@ -594,6 +598,8 @@ func (t *ConnectReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse func (rcv *ConnectReplyRaw) UnPackTo(t *ConnectReplyRawT) { t.Debug = rcv.Debug() t.Cover = rcv.Cover() + t.CoverEdges = rcv.CoverEdges() + t.Kernel64Bit = rcv.Kernel64Bit() t.Procs = rcv.Procs() t.Slowdown = rcv.Slowdown() t.SyscallTimeoutMs = rcv.SyscallTimeoutMs() @@ -681,20 +687,44 @@ func (rcv *ConnectReplyRaw) MutateCover(n bool) bool { return rcv._tab.MutateBoolSlot(6, n) } -func (rcv *ConnectReplyRaw) Procs() int32 { +func (rcv *ConnectReplyRaw) CoverEdges() bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { + return rcv._tab.GetBool(o + rcv._tab.Pos) + } + return false +} + +func (rcv *ConnectReplyRaw) MutateCoverEdges(n bool) bool { + return rcv._tab.MutateBoolSlot(8, n) +} + +func (rcv *ConnectReplyRaw) Kernel64Bit() bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + return rcv._tab.GetBool(o + rcv._tab.Pos) + } + return false +} + +func (rcv *ConnectReplyRaw) MutateKernel64Bit(n bool) bool { + return rcv._tab.MutateBoolSlot(10, n) +} + +func (rcv *ConnectReplyRaw) Procs() int32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { return rcv._tab.GetInt32(o + rcv._tab.Pos) } return 0 } func (rcv *ConnectReplyRaw) MutateProcs(n int32) bool { - return rcv._tab.MutateInt32Slot(8, n) + return rcv._tab.MutateInt32Slot(12, n) } func (rcv *ConnectReplyRaw) Slowdown() int32 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) if o != 0 { return rcv._tab.GetInt32(o + rcv._tab.Pos) } @@ -702,11 +732,11 @@ func (rcv *ConnectReplyRaw) Slowdown() int32 { } func (rcv *ConnectReplyRaw) MutateSlowdown(n int32) bool { - return rcv._tab.MutateInt32Slot(10, n) + return rcv._tab.MutateInt32Slot(14, n) } func (rcv *ConnectReplyRaw) SyscallTimeoutMs() int32 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) if o != 0 { return rcv._tab.GetInt32(o + rcv._tab.Pos) } @@ -714,11 +744,11 @@ func (rcv *ConnectReplyRaw) SyscallTimeoutMs() int32 { } func (rcv *ConnectReplyRaw) MutateSyscallTimeoutMs(n int32) bool { - return rcv._tab.MutateInt32Slot(12, n) + return rcv._tab.MutateInt32Slot(16, n) } func (rcv *ConnectReplyRaw) ProgramTimeoutMs() int32 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(18)) if o != 0 { return rcv._tab.GetInt32(o + rcv._tab.Pos) } @@ -726,11 +756,11 @@ func (rcv *ConnectReplyRaw) ProgramTimeoutMs() int32 { } func (rcv *ConnectReplyRaw) MutateProgramTimeoutMs(n int32) bool { - return rcv._tab.MutateInt32Slot(14, n) + return rcv._tab.MutateInt32Slot(18, n) } func (rcv *ConnectReplyRaw) LeakFrames(j int) []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(20)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4)) @@ -739,7 +769,7 @@ func (rcv *ConnectReplyRaw) LeakFrames(j int) []byte { } func (rcv *ConnectReplyRaw) LeakFramesLength() int { - o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(20)) if o != 0 { return rcv._tab.VectorLen(o) } @@ -747,7 +777,7 @@ func (rcv *ConnectReplyRaw) LeakFramesLength() int { } func (rcv *ConnectReplyRaw) RaceFrames(j int) []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(18)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(22)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4)) @@ -756,7 +786,7 @@ func (rcv *ConnectReplyRaw) RaceFrames(j int) []byte { } func (rcv *ConnectReplyRaw) RaceFramesLength() int { - o := flatbuffers.UOffsetT(rcv._tab.Offset(18)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(22)) if o != 0 { return rcv._tab.VectorLen(o) } @@ -764,7 +794,7 @@ func (rcv *ConnectReplyRaw) RaceFramesLength() int { } func (rcv *ConnectReplyRaw) Features() Feature { - o := flatbuffers.UOffsetT(rcv._tab.Offset(20)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(24)) if o != 0 { return Feature(rcv._tab.GetUint64(o + rcv._tab.Pos)) } @@ -772,11 +802,11 @@ func (rcv *ConnectReplyRaw) Features() Feature { } func (rcv *ConnectReplyRaw) MutateFeatures(n Feature) bool { - return rcv._tab.MutateUint64Slot(20, uint64(n)) + return rcv._tab.MutateUint64Slot(24, uint64(n)) } func (rcv *ConnectReplyRaw) Files(j int) []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(22)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(26)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4)) @@ -785,7 +815,7 @@ func (rcv *ConnectReplyRaw) Files(j int) []byte { } func (rcv *ConnectReplyRaw) FilesLength() int { - o := flatbuffers.UOffsetT(rcv._tab.Offset(22)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(26)) if o != 0 { return rcv._tab.VectorLen(o) } @@ -793,7 +823,7 @@ func (rcv *ConnectReplyRaw) FilesLength() int { } func (rcv *ConnectReplyRaw) Globs(j int) []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(24)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(28)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4)) @@ -802,7 +832,7 @@ func (rcv *ConnectReplyRaw) Globs(j int) []byte { } func (rcv *ConnectReplyRaw) GlobsLength() int { - o := flatbuffers.UOffsetT(rcv._tab.Offset(24)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(28)) if o != 0 { return rcv._tab.VectorLen(o) } @@ -810,7 +840,7 @@ func (rcv *ConnectReplyRaw) GlobsLength() int { } func ConnectReplyRawStart(builder *flatbuffers.Builder) { - builder.StartObject(11) + builder.StartObject(13) } func ConnectReplyRawAddDebug(builder *flatbuffers.Builder, debug bool) { builder.PrependBoolSlot(0, debug, false) @@ -818,41 +848,47 @@ func ConnectReplyRawAddDebug(builder *flatbuffers.Builder, debug bool) { func ConnectReplyRawAddCover(builder *flatbuffers.Builder, cover bool) { builder.PrependBoolSlot(1, cover, false) } +func ConnectReplyRawAddCoverEdges(builder *flatbuffers.Builder, coverEdges bool) { + builder.PrependBoolSlot(2, coverEdges, false) +} +func ConnectReplyRawAddKernel64Bit(builder *flatbuffers.Builder, kernel64Bit bool) { + builder.PrependBoolSlot(3, kernel64Bit, false) +} func ConnectReplyRawAddProcs(builder *flatbuffers.Builder, procs int32) { - builder.PrependInt32Slot(2, procs, 0) + builder.PrependInt32Slot(4, procs, 0) } func ConnectReplyRawAddSlowdown(builder *flatbuffers.Builder, slowdown int32) { - builder.PrependInt32Slot(3, slowdown, 0) + builder.PrependInt32Slot(5, slowdown, 0) } func ConnectReplyRawAddSyscallTimeoutMs(builder *flatbuffers.Builder, syscallTimeoutMs int32) { - builder.PrependInt32Slot(4, syscallTimeoutMs, 0) + builder.PrependInt32Slot(6, syscallTimeoutMs, 0) } func ConnectReplyRawAddProgramTimeoutMs(builder *flatbuffers.Builder, programTimeoutMs int32) { - builder.PrependInt32Slot(5, programTimeoutMs, 0) + builder.PrependInt32Slot(7, programTimeoutMs, 0) } func ConnectReplyRawAddLeakFrames(builder *flatbuffers.Builder, leakFrames flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(6, flatbuffers.UOffsetT(leakFrames), 0) + builder.PrependUOffsetTSlot(8, flatbuffers.UOffsetT(leakFrames), 0) } func ConnectReplyRawStartLeakFramesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func ConnectReplyRawAddRaceFrames(builder *flatbuffers.Builder, raceFrames flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(7, flatbuffers.UOffsetT(raceFrames), 0) + builder.PrependUOffsetTSlot(9, flatbuffers.UOffsetT(raceFrames), 0) } func ConnectReplyRawStartRaceFramesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func ConnectReplyRawAddFeatures(builder *flatbuffers.Builder, features Feature) { - builder.PrependUint64Slot(8, uint64(features), 0) + builder.PrependUint64Slot(10, uint64(features), 0) } func ConnectReplyRawAddFiles(builder *flatbuffers.Builder, files flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(9, flatbuffers.UOffsetT(files), 0) + builder.PrependUOffsetTSlot(11, flatbuffers.UOffsetT(files), 0) } func ConnectReplyRawStartFilesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func ConnectReplyRawAddGlobs(builder *flatbuffers.Builder, globs flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(10, flatbuffers.UOffsetT(globs), 0) + builder.PrependUOffsetTSlot(12, flatbuffers.UOffsetT(globs), 0) } func ConnectReplyRawStartGlobsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) diff --git a/pkg/flatrpc/flatrpc.h b/pkg/flatrpc/flatrpc.h index e3c2de7af..a2704f9e9 100644 --- a/pkg/flatrpc/flatrpc.h +++ b/pkg/flatrpc/flatrpc.h @@ -807,6 +807,8 @@ struct ConnectReplyRawT : public flatbuffers::NativeTable { typedef ConnectReplyRaw TableType; bool debug = false; bool cover = false; + bool cover_edges = false; + bool kernel_64_bit = false; int32_t procs = 0; int32_t slowdown = 0; int32_t syscall_timeout_ms = 0; @@ -824,15 +826,17 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_DEBUG = 4, VT_COVER = 6, - VT_PROCS = 8, - VT_SLOWDOWN = 10, - VT_SYSCALL_TIMEOUT_MS = 12, - VT_PROGRAM_TIMEOUT_MS = 14, - VT_LEAK_FRAMES = 16, - VT_RACE_FRAMES = 18, - VT_FEATURES = 20, - VT_FILES = 22, - VT_GLOBS = 24 + VT_COVER_EDGES = 8, + VT_KERNEL_64_BIT = 10, + VT_PROCS = 12, + VT_SLOWDOWN = 14, + VT_SYSCALL_TIMEOUT_MS = 16, + VT_PROGRAM_TIMEOUT_MS = 18, + VT_LEAK_FRAMES = 20, + VT_RACE_FRAMES = 22, + VT_FEATURES = 24, + VT_FILES = 26, + VT_GLOBS = 28 }; bool debug() const { return GetField<uint8_t>(VT_DEBUG, 0) != 0; @@ -840,6 +844,12 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { bool cover() const { return GetField<uint8_t>(VT_COVER, 0) != 0; } + bool cover_edges() const { + return GetField<uint8_t>(VT_COVER_EDGES, 0) != 0; + } + bool kernel_64_bit() const { + return GetField<uint8_t>(VT_KERNEL_64_BIT, 0) != 0; + } int32_t procs() const { return GetField<int32_t>(VT_PROCS, 0); } @@ -871,6 +881,8 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_DEBUG, 1) && VerifyField<uint8_t>(verifier, VT_COVER, 1) && + VerifyField<uint8_t>(verifier, VT_COVER_EDGES, 1) && + VerifyField<uint8_t>(verifier, VT_KERNEL_64_BIT, 1) && VerifyField<int32_t>(verifier, VT_PROCS, 4) && VerifyField<int32_t>(verifier, VT_SLOWDOWN, 4) && VerifyField<int32_t>(verifier, VT_SYSCALL_TIMEOUT_MS, 4) && @@ -905,6 +917,12 @@ struct ConnectReplyRawBuilder { void add_cover(bool cover) { fbb_.AddElement<uint8_t>(ConnectReplyRaw::VT_COVER, static_cast<uint8_t>(cover), 0); } + void add_cover_edges(bool cover_edges) { + fbb_.AddElement<uint8_t>(ConnectReplyRaw::VT_COVER_EDGES, static_cast<uint8_t>(cover_edges), 0); + } + void add_kernel_64_bit(bool kernel_64_bit) { + fbb_.AddElement<uint8_t>(ConnectReplyRaw::VT_KERNEL_64_BIT, static_cast<uint8_t>(kernel_64_bit), 0); + } void add_procs(int32_t procs) { fbb_.AddElement<int32_t>(ConnectReplyRaw::VT_PROCS, procs, 0); } @@ -947,6 +965,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw( flatbuffers::FlatBufferBuilder &_fbb, bool debug = false, bool cover = false, + bool cover_edges = false, + bool kernel_64_bit = false, int32_t procs = 0, int32_t slowdown = 0, int32_t syscall_timeout_ms = 0, @@ -966,6 +986,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw( builder_.add_syscall_timeout_ms(syscall_timeout_ms); builder_.add_slowdown(slowdown); builder_.add_procs(procs); + builder_.add_kernel_64_bit(kernel_64_bit); + builder_.add_cover_edges(cover_edges); builder_.add_cover(cover); builder_.add_debug(debug); return builder_.Finish(); @@ -975,6 +997,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRawDirect( flatbuffers::FlatBufferBuilder &_fbb, bool debug = false, bool cover = false, + bool cover_edges = false, + bool kernel_64_bit = false, int32_t procs = 0, int32_t slowdown = 0, int32_t syscall_timeout_ms = 0, @@ -992,6 +1016,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRawDirect( _fbb, debug, cover, + cover_edges, + kernel_64_bit, procs, slowdown, syscall_timeout_ms, @@ -2446,6 +2472,8 @@ inline void ConnectReplyRaw::UnPackTo(ConnectReplyRawT *_o, const flatbuffers::r (void)_resolver; { auto _e = debug(); _o->debug = _e; } { auto _e = cover(); _o->cover = _e; } + { auto _e = cover_edges(); _o->cover_edges = _e; } + { auto _e = kernel_64_bit(); _o->kernel_64_bit = _e; } { auto _e = procs(); _o->procs = _e; } { auto _e = slowdown(); _o->slowdown = _e; } { auto _e = syscall_timeout_ms(); _o->syscall_timeout_ms = _e; } @@ -2467,6 +2495,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(flatbuffers::F struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConnectReplyRawT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _debug = _o->debug; auto _cover = _o->cover; + auto _cover_edges = _o->cover_edges; + auto _kernel_64_bit = _o->kernel_64_bit; auto _procs = _o->procs; auto _slowdown = _o->slowdown; auto _syscall_timeout_ms = _o->syscall_timeout_ms; @@ -2480,6 +2510,8 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(flatbuffers::F _fbb, _debug, _cover, + _cover_edges, + _kernel_64_bit, _procs, _slowdown, _syscall_timeout_ms, |
