diff options
| author | Dmitry Vyukov <dvyukov@google.com> | 2024-06-04 12:55:40 +0200 |
|---|---|---|
| committer | Dmitry Vyukov <dvyukov@google.com> | 2024-06-24 09:57:34 +0000 |
| commit | 90d67044dab68568e8f35bc14b68055dbd166eff (patch) | |
| tree | d460a83eafdb63342da8ec992f2d78dbff5403a0 /pkg/flatrpc | |
| parent | 6ac2b94a701ded60e4782530bd9f209513acb324 (diff) | |
executor: refactor coverage filter
Diffstat (limited to 'pkg/flatrpc')
| -rw-r--r-- | pkg/flatrpc/flatrpc.fbs | 3 | ||||
| -rw-r--r-- | pkg/flatrpc/flatrpc.go | 36 | ||||
| -rw-r--r-- | pkg/flatrpc/flatrpc.h | 59 |
3 files changed, 55 insertions, 43 deletions
diff --git a/pkg/flatrpc/flatrpc.fbs b/pkg/flatrpc/flatrpc.fbs index 7800d6779..78adc8ec5 100644 --- a/pkg/flatrpc/flatrpc.fbs +++ b/pkg/flatrpc/flatrpc.fbs @@ -54,7 +54,7 @@ table InfoRequestRaw { } table InfoReplyRaw { - cover_filter :[uint8]; + cover_filter :[uint64]; } table FileInfoRaw { @@ -134,7 +134,6 @@ enum ExecFlag : uint64 (bit_flags) { DedupCover, // deduplicate coverage in executor CollectComps, // collect KCOV comparisons Threaded, // use multiple threads to mitigate blocked syscalls - CoverFilter, // setup and use bitmap to do coverage filter } struct ExecOptsRaw { diff --git a/pkg/flatrpc/flatrpc.go b/pkg/flatrpc/flatrpc.go index 9fbd2d0ae..b561334fe 100644 --- a/pkg/flatrpc/flatrpc.go +++ b/pkg/flatrpc/flatrpc.go @@ -312,7 +312,6 @@ const ( ExecFlagDedupCover ExecFlag = 4 ExecFlagCollectComps ExecFlag = 8 ExecFlagThreaded ExecFlag = 16 - ExecFlagCoverFilter ExecFlag = 32 ) var EnumNamesExecFlag = map[ExecFlag]string{ @@ -321,7 +320,6 @@ var EnumNamesExecFlag = map[ExecFlag]string{ ExecFlagDedupCover: "DedupCover", ExecFlagCollectComps: "CollectComps", ExecFlagThreaded: "Threaded", - ExecFlagCoverFilter: "CoverFilter", } var EnumValuesExecFlag = map[string]ExecFlag{ @@ -330,7 +328,6 @@ var EnumValuesExecFlag = map[string]ExecFlag{ "DedupCover": ExecFlagDedupCover, "CollectComps": ExecFlagCollectComps, "Threaded": ExecFlagThreaded, - "CoverFilter": ExecFlagCoverFilter, } func (v ExecFlag) String() string { @@ -1005,7 +1002,7 @@ func InfoRequestRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { } type InfoReplyRawT struct { - CoverFilter []byte `json:"cover_filter"` + CoverFilter []uint64 `json:"cover_filter"` } func (t *InfoReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT { @@ -1014,7 +1011,12 @@ func (t *InfoReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT } coverFilterOffset := flatbuffers.UOffsetT(0) if t.CoverFilter != nil { - coverFilterOffset = builder.CreateByteString(t.CoverFilter) + coverFilterLength := len(t.CoverFilter) + InfoReplyRawStartCoverFilterVector(builder, coverFilterLength) + for j := coverFilterLength - 1; j >= 0; j-- { + builder.PrependUint64(t.CoverFilter[j]) + } + coverFilterOffset = builder.EndVector(coverFilterLength) } InfoReplyRawStart(builder) InfoReplyRawAddCoverFilter(builder, coverFilterOffset) @@ -1022,7 +1024,11 @@ func (t *InfoReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT } func (rcv *InfoReplyRaw) UnPackTo(t *InfoReplyRawT) { - t.CoverFilter = rcv.CoverFilterBytes() + coverFilterLength := rcv.CoverFilterLength() + t.CoverFilter = make([]uint64, coverFilterLength) + for j := 0; j < coverFilterLength; j++ { + t.CoverFilter[j] = rcv.CoverFilter(j) + } } func (rcv *InfoReplyRaw) UnPack() *InfoReplyRawT { @@ -1061,11 +1067,11 @@ func (rcv *InfoReplyRaw) Table() flatbuffers.Table { return rcv._tab } -func (rcv *InfoReplyRaw) CoverFilter(j int) byte { +func (rcv *InfoReplyRaw) CoverFilter(j int) uint64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { a := rcv._tab.Vector(o) - return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1)) + return rcv._tab.GetUint64(a + flatbuffers.UOffsetT(j*8)) } return 0 } @@ -1078,19 +1084,11 @@ func (rcv *InfoReplyRaw) CoverFilterLength() int { return 0 } -func (rcv *InfoReplyRaw) CoverFilterBytes() []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) - if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) - } - return nil -} - -func (rcv *InfoReplyRaw) MutateCoverFilter(j int, n byte) bool { +func (rcv *InfoReplyRaw) MutateCoverFilter(j int, n uint64) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { a := rcv._tab.Vector(o) - return rcv._tab.MutateByte(a+flatbuffers.UOffsetT(j*1), n) + return rcv._tab.MutateUint64(a+flatbuffers.UOffsetT(j*8), n) } return false } @@ -1102,7 +1100,7 @@ func InfoReplyRawAddCoverFilter(builder *flatbuffers.Builder, coverFilter flatbu builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(coverFilter), 0) } func InfoReplyRawStartCoverFilterVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { - return builder.StartVector(1, numElems, 1) + return builder.StartVector(8, numElems, 8) } func InfoReplyRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() diff --git a/pkg/flatrpc/flatrpc.h b/pkg/flatrpc/flatrpc.h index aa02046b1..d430e48f2 100644 --- a/pkg/flatrpc/flatrpc.h +++ b/pkg/flatrpc/flatrpc.h @@ -509,34 +509,49 @@ enum class ExecFlag : uint64_t { DedupCover = 4ULL, CollectComps = 8ULL, Threaded = 16ULL, - CoverFilter = 32ULL, NONE = 0, - ANY = 63ULL + ANY = 31ULL }; FLATBUFFERS_DEFINE_BITMASK_OPERATORS(ExecFlag, uint64_t) -inline const ExecFlag (&EnumValuesExecFlag())[6] { +inline const ExecFlag (&EnumValuesExecFlag())[5] { static const ExecFlag values[] = { ExecFlag::CollectSignal, ExecFlag::CollectCover, ExecFlag::DedupCover, ExecFlag::CollectComps, - ExecFlag::Threaded, - ExecFlag::CoverFilter + ExecFlag::Threaded }; return values; } +inline const char * const *EnumNamesExecFlag() { + static const char * const names[17] = { + "CollectSignal", + "CollectCover", + "", + "DedupCover", + "", + "", + "", + "CollectComps", + "", + "", + "", + "", + "", + "", + "", + "Threaded", + nullptr + }; + return names; +} + inline const char *EnumNameExecFlag(ExecFlag e) { - switch (e) { - case ExecFlag::CollectSignal: return "CollectSignal"; - case ExecFlag::CollectCover: return "CollectCover"; - case ExecFlag::DedupCover: return "DedupCover"; - case ExecFlag::CollectComps: return "CollectComps"; - case ExecFlag::Threaded: return "Threaded"; - case ExecFlag::CoverFilter: return "CoverFilter"; - default: return ""; - } + if (flatbuffers::IsOutRange(e, ExecFlag::CollectSignal, ExecFlag::Threaded)) return ""; + const size_t index = static_cast<size_t>(e) - static_cast<size_t>(ExecFlag::CollectSignal); + return EnumNamesExecFlag()[index]; } enum class CallFlag : uint8_t { @@ -1018,7 +1033,7 @@ flatbuffers::Offset<InfoRequestRaw> CreateInfoRequestRaw(flatbuffers::FlatBuffer struct InfoReplyRawT : public flatbuffers::NativeTable { typedef InfoReplyRaw TableType; - std::vector<uint8_t> cover_filter{}; + std::vector<uint64_t> cover_filter{}; }; struct InfoReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { @@ -1027,8 +1042,8 @@ struct InfoReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_COVER_FILTER = 4 }; - const flatbuffers::Vector<uint8_t> *cover_filter() const { - return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_COVER_FILTER); + const flatbuffers::Vector<uint64_t> *cover_filter() const { + return GetPointer<const flatbuffers::Vector<uint64_t> *>(VT_COVER_FILTER); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && @@ -1045,7 +1060,7 @@ struct InfoReplyRawBuilder { typedef InfoReplyRaw Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; - void add_cover_filter(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> cover_filter) { + void add_cover_filter(flatbuffers::Offset<flatbuffers::Vector<uint64_t>> cover_filter) { fbb_.AddOffset(InfoReplyRaw::VT_COVER_FILTER, cover_filter); } explicit InfoReplyRawBuilder(flatbuffers::FlatBufferBuilder &_fbb) @@ -1061,7 +1076,7 @@ struct InfoReplyRawBuilder { inline flatbuffers::Offset<InfoReplyRaw> CreateInfoReplyRaw( flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset<flatbuffers::Vector<uint8_t>> cover_filter = 0) { + flatbuffers::Offset<flatbuffers::Vector<uint64_t>> cover_filter = 0) { InfoReplyRawBuilder builder_(_fbb); builder_.add_cover_filter(cover_filter); return builder_.Finish(); @@ -1069,8 +1084,8 @@ inline flatbuffers::Offset<InfoReplyRaw> CreateInfoReplyRaw( inline flatbuffers::Offset<InfoReplyRaw> CreateInfoReplyRawDirect( flatbuffers::FlatBufferBuilder &_fbb, - const std::vector<uint8_t> *cover_filter = nullptr) { - auto cover_filter__ = cover_filter ? _fbb.CreateVector<uint8_t>(*cover_filter) : 0; + const std::vector<uint64_t> *cover_filter = nullptr) { + auto cover_filter__ = cover_filter ? _fbb.CreateVector<uint64_t>(*cover_filter) : 0; return rpc::CreateInfoReplyRaw( _fbb, cover_filter__); @@ -2351,7 +2366,7 @@ inline InfoReplyRawT *InfoReplyRaw::UnPack(const flatbuffers::resolver_function_ inline void InfoReplyRaw::UnPackTo(InfoReplyRawT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = cover_filter(); if (_e) { _o->cover_filter.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->cover_filter.begin()); } } + { auto _e = cover_filter(); if (_e) { _o->cover_filter.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->cover_filter[_i] = _e->Get(_i); } } } } inline flatbuffers::Offset<InfoReplyRaw> InfoReplyRaw::Pack(flatbuffers::FlatBufferBuilder &_fbb, const InfoReplyRawT* _o, const flatbuffers::rehasher_function_t *_rehasher) { |
