diff options
| author | Joey Jiao <quic_jiangenj@quicinc.com> | 2024-05-14 10:51:19 +0800 |
|---|---|---|
| committer | Dmitry Vyukov <dvyukov@google.com> | 2024-05-27 09:44:25 +0000 |
| commit | b75d07e8995d9d6682851c553b23b4d3e9734436 (patch) | |
| tree | 09e80808b88d2f6f576becef41a887eb242f3ce3 /pkg | |
| parent | 339d8cf83c825a88ff6d1c6b5a73e20fffd33010 (diff) | |
all: adapt all cover and sig to 64bit
Taken some arm64 devices for example:
kaslr_offset is diff at bits 12-40, and kernel modules are loaded at 2GB space,
so we have `ffffffd342e10000 T _stext` where uppper 32bit is ffffffd3. However,
if we check modules range, the 1st module is loaded at 0xffffffd2eeb2a000,
while the last module is loaded at 0xffffffd2f42c4000.
We can see the upper 32bits are diff for core kernel and modules.
If we use current 32bits for covered PC, we will get wrong module address
recovered.
So we need to move to 64bit cover and signal:
- change cover/sig to 64bit to fit for syz-executor change
- remove kernel upper base logic as kernel upper base is not a constant when
kaslr enabled for core kernel and modules.
- remove unused pcBase
Diffstat (limited to 'pkg')
| -rw-r--r-- | pkg/corpus/corpus.go | 10 | ||||
| -rw-r--r-- | pkg/corpus/corpus_test.go | 12 | ||||
| -rw-r--r-- | pkg/cover/backend/backend.go | 2 | ||||
| -rw-r--r-- | pkg/cover/backend/dwarf.go | 16 | ||||
| -rw-r--r-- | pkg/cover/backend/gvisor.go | 4 | ||||
| -rw-r--r-- | pkg/cover/backend/pc.go | 4 | ||||
| -rw-r--r-- | pkg/cover/canonicalizer.go | 54 | ||||
| -rw-r--r-- | pkg/cover/canonicalizer_test.go | 62 | ||||
| -rw-r--r-- | pkg/cover/cover.go | 10 | ||||
| -rw-r--r-- | pkg/cover/cover_test.go | 28 | ||||
| -rw-r--r-- | pkg/cover/html.go | 8 | ||||
| -rw-r--r-- | pkg/cover/report.go | 2 | ||||
| -rw-r--r-- | pkg/flatrpc/flatrpc.fbs | 10 | ||||
| -rw-r--r-- | pkg/flatrpc/flatrpc.go | 80 | ||||
| -rw-r--r-- | pkg/flatrpc/flatrpc.h | 70 | ||||
| -rw-r--r-- | pkg/fuzzer/cover.go | 2 | ||||
| -rw-r--r-- | pkg/fuzzer/fuzzer_test.go | 8 | ||||
| -rw-r--r-- | pkg/fuzzer/job.go | 4 | ||||
| -rw-r--r-- | pkg/fuzzer/job_test.go | 22 | ||||
| -rw-r--r-- | pkg/ipc/ipc.go | 15 | ||||
| -rw-r--r-- | pkg/signal/signal.go | 20 | ||||
| -rw-r--r-- | pkg/signal/signal_test.go | 16 | ||||
| -rw-r--r-- | pkg/vminfo/vminfo_test.go | 4 |
23 files changed, 223 insertions, 240 deletions
diff --git a/pkg/corpus/corpus.go b/pkg/corpus/corpus.go index 46e388366..5b2b9983f 100644 --- a/pkg/corpus/corpus.go +++ b/pkg/corpus/corpus.go @@ -53,7 +53,7 @@ func NewMonitoredCorpus(ctx context.Context, updates chan<- NewItemEvent) *Corpu // sysalls. In that case, there will be several ItemUpdate entities. type ItemUpdate struct { Call int - RawCover []uint32 + RawCover []uint64 } // Item objects are to be treated as immutable, otherwise it's just @@ -66,7 +66,7 @@ type Item struct { ProgData []byte // to save some Serialize() calls HasAny bool // whether the prog contains squashed arguments Signal signal.Signal - Cover []uint32 + Cover []uint64 Updates []ItemUpdate } @@ -78,15 +78,15 @@ type NewInput struct { Prog *prog.Prog Call int Signal signal.Signal - Cover []uint32 - RawCover []uint32 + Cover []uint64 + RawCover []uint64 } type NewItemEvent struct { Sig string Exists bool ProgData []byte - NewCover []uint32 + NewCover []uint64 } func (corpus *Corpus) Save(inp NewInput) { diff --git a/pkg/corpus/corpus_test.go b/pkg/corpus/corpus_test.go index 62aad1e04..90b11717a 100644 --- a/pkg/corpus/corpus_test.go +++ b/pkg/corpus/corpus_test.go @@ -62,16 +62,16 @@ func TestCorpusCoverage(t *testing.T) { rs := rand.NewSource(0) inp := generateInput(target, rs, 5, 5) - inp.Cover = []uint32{10, 11} + inp.Cover = []uint64{10, 11} go corpus.Save(inp) event := <-ch - assert.Equal(t, []uint32{10, 11}, event.NewCover) + assert.Equal(t, []uint64{10, 11}, event.NewCover) inp.Call = 1 - inp.Cover = []uint32{11, 12} + inp.Cover = []uint64{11, 12} go corpus.Save(inp) event = <-ch - assert.Equal(t, []uint32{12}, event.NewCover) + assert.Equal(t, []uint64{12}, event.NewCover) // Check the total corpus size. assert.Equal(t, corpus.StatCover.Val(), 3) @@ -101,9 +101,9 @@ func TestCorpusSaveConcurrency(t *testing.T) { func generateInput(target *prog.Target, rs rand.Source, ncalls, sizeSig int) NewInput { p := target.Generate(rs, ncalls, target.DefaultChoiceTable()) - var raw []uint32 + var raw []uint64 for i := 1; i <= sizeSig; i++ { - raw = append(raw, uint32(i)) + raw = append(raw, uint64(i)) } return NewInput{ Prog: p, diff --git a/pkg/cover/backend/backend.go b/pkg/cover/backend/backend.go index 78e90b6c0..5aa6a221c 100644 --- a/pkg/cover/backend/backend.go +++ b/pkg/cover/backend/backend.go @@ -14,7 +14,7 @@ type Impl struct { Symbols []*Symbol Frames []Frame Symbolize func(pcs map[*Module][]uint64) ([]Frame, error) - RestorePC func(pc uint32) uint64 + RestorePC func(pc uint64) uint64 CallbackPoints []uint64 PreciseCoverage bool } diff --git a/pkg/cover/backend/dwarf.go b/pkg/cover/backend/dwarf.go index 9cad576be..3f64cf752 100644 --- a/pkg/cover/backend/dwarf.go +++ b/pkg/cover/backend/dwarf.go @@ -152,7 +152,6 @@ func makeDWARFUnsafe(params *dwarfParams) (*Impl, error) { var allSymbols []*Symbol var allRanges []pcRange var allUnits []*CompileUnit - var pcBase uint64 preciseCoverage := true for _, module := range modules { errc := make(chan error, 1) @@ -169,9 +168,6 @@ func makeDWARFUnsafe(params *dwarfParams) (*Impl, error) { return } allSymbols = append(allSymbols, result.Symbols...) - if module.Name == "" { - pcBase = info.textAddr - } allCoverPoints[0] = append(allCoverPoints[0], result.CoverPoints[0]...) allCoverPoints[1] = append(allCoverPoints[1], result.CoverPoints[1]...) if module.Name == "" && len(result.CoverPoints[0]) == 0 { @@ -220,10 +216,6 @@ func makeDWARFUnsafe(params *dwarfParams) (*Impl, error) { if len(allSymbols) == 0 || len(allUnits) == 0 { return nil, fmt.Errorf("failed to parse DWARF (set CONFIG_DEBUG_INFO=y on linux)") } - if target.OS == targets.FreeBSD { - // On FreeBSD .text address in ELF is 0, but .text is actually mapped at 0xffffffff. - pcBase = ^uint64(0) - } var interner symbolizer.Interner impl := &Impl{ Units: allUnits, @@ -231,16 +223,16 @@ func makeDWARFUnsafe(params *dwarfParams) (*Impl, error) { Symbolize: func(pcs map[*Module][]uint64) ([]Frame, error) { return symbolize(target, &interner, objDir, srcDir, buildDir, splitBuildDelimiters, pcs) }, - RestorePC: makeRestorePC(params, pcBase), + RestorePC: makeRestorePC(params), CallbackPoints: allCoverPoints[0], PreciseCoverage: preciseCoverage, } return impl, nil } -func makeRestorePC(params *dwarfParams, pcBase uint64) func(pc uint32) uint64 { - return func(pcLow uint32) uint64 { - return PreviousInstructionPC(params.target, RestorePC(pcLow, uint32(pcBase>>32))) +func makeRestorePC(params *dwarfParams) func(pc uint64) uint64 { + return func(pc uint64) uint64 { + return PreviousInstructionPC(params.target, pc) } } diff --git a/pkg/cover/backend/gvisor.go b/pkg/cover/backend/gvisor.go index 2e662f4bd..65a3da9a2 100644 --- a/pkg/cover/backend/gvisor.go +++ b/pkg/cover/backend/gvisor.go @@ -48,8 +48,8 @@ func makeGvisor(target *targets.Target, objDir, srcDir, buildDir string, modules impl := &Impl{ Units: units, Frames: frames, - RestorePC: func(pc uint32) uint64 { - return uint64(pc) + RestorePC: func(pc uint64) uint64 { + return pc }, } return impl, nil diff --git a/pkg/cover/backend/pc.go b/pkg/cover/backend/pc.go index 78bb380a6..10462c243 100644 --- a/pkg/cover/backend/pc.go +++ b/pkg/cover/backend/pc.go @@ -9,10 +9,6 @@ import ( "github.com/google/syzkaller/sys/targets" ) -func RestorePC(pc, base uint32) uint64 { - return uint64(base)<<32 + uint64(pc) -} - func PreviousInstructionPC(target *targets.Target, pc uint64) uint64 { offset := instructionLen(target.Arch) pc -= offset diff --git a/pkg/cover/canonicalizer.go b/pkg/cover/canonicalizer.go index 3c21c0947..7241a7478 100644 --- a/pkg/cover/canonicalizer.go +++ b/pkg/cover/canonicalizer.go @@ -15,7 +15,7 @@ type Canonicalizer struct { modules map[string]KernelModule // Contains a sorted list of the canonical module addresses. - moduleKeys []uint32 + moduleKeys []uint64 } type CanonicalizerInstance struct { @@ -28,20 +28,20 @@ type CanonicalizerInstance struct { // Contains the current conversion maps used. type Convert struct { - conversionHash map[uint32]*canonicalizerModule - moduleKeys []uint32 + conversionHash map[uint64]*canonicalizerModule + moduleKeys []uint64 } type convertContext struct { errCount int - errPC uint32 + errPC uint64 convert *Convert } // Contains the offset and final address of each module. type canonicalizerModule struct { offset int - endAddr uint32 + endAddr uint64 // Discard coverage from current module. // Set to true if module is not present in canonical. discard bool @@ -59,7 +59,7 @@ func NewCanonicalizer(modules []KernelModule, flagSignal bool) *Canonicalizer { } // Store sorted canonical address keys. - canonicalModuleKeys := make([]uint32, len(modules)) + canonicalModuleKeys := make([]uint64, len(modules)) setModuleKeys(canonicalModuleKeys, modules) return &Canonicalizer{ modules: canonicalModules, @@ -72,35 +72,35 @@ func (can *Canonicalizer) NewInstance(modules []KernelModule) *CanonicalizerInst return &CanonicalizerInstance{} } // Save sorted list of module offsets. - moduleKeys := make([]uint32, len(modules)) + moduleKeys := make([]uint64, len(modules)) setModuleKeys(moduleKeys, modules) // Create a hash between the "canonical" module addresses and each VM instance. - instToCanonicalMap := make(map[uint32]*canonicalizerModule) - canonicalToInstMap := make(map[uint32]*canonicalizerModule) + instToCanonicalMap := make(map[uint64]*canonicalizerModule) + canonicalToInstMap := make(map[uint64]*canonicalizerModule) for _, module := range modules { discard := false - canonicalAddr := uint32(0) + canonicalAddr := uint64(0) canonicalModule, found := can.modules[module.Name] if !found || canonicalModule.Size != module.Size { log.Errorf("kernel build has changed; instance module %v differs from canonical", module.Name) discard = true } if found { - canonicalAddr = uint32(canonicalModule.Addr) + canonicalAddr = canonicalModule.Addr } - instAddr := uint32(module.Addr) + instAddr := module.Addr canonicalToInstMap[canonicalAddr] = &canonicalizerModule{ offset: int(instAddr) - int(canonicalAddr), - endAddr: uint32(module.Size) + canonicalAddr, + endAddr: module.Size + canonicalAddr, discard: discard, } instToCanonicalMap[instAddr] = &canonicalizerModule{ offset: int(canonicalAddr) - int(instAddr), - endAddr: uint32(module.Size) + instAddr, + endAddr: module.Size + instAddr, discard: discard, } } @@ -118,26 +118,26 @@ func (can *Canonicalizer) NewInstance(modules []KernelModule) *CanonicalizerInst } } -func (ci *CanonicalizerInstance) Canonicalize(elems []uint32) []uint32 { +func (ci *CanonicalizerInstance) Canonicalize(elems []uint64) []uint64 { if ci.canonical.moduleKeys == nil { return elems } return ci.canonicalize.convertPCs(elems) } -func (ci *CanonicalizerInstance) Decanonicalize(elems []uint32) []uint32 { +func (ci *CanonicalizerInstance) Decanonicalize(elems []uint64) []uint64 { if ci.canonical.moduleKeys == nil { return elems } return ci.decanonicalize.convertPCs(elems) } -func (ci *CanonicalizerInstance) DecanonicalizeFilter(bitmap map[uint32]uint32) map[uint32]uint32 { +func (ci *CanonicalizerInstance) DecanonicalizeFilter(bitmap map[uint64]uint32) map[uint64]uint32 { // Skip conversion if modules or filter are not used. if ci.canonical.moduleKeys == nil || len(bitmap) == 0 { return bitmap } - instBitmap := make(map[uint32]uint32) + instBitmap := make(map[uint64]uint32) convCtx := &convertContext{convert: ci.decanonicalize} for pc, val := range bitmap { if newPC, ok := ci.decanonicalize.convertPC(pc); ok { @@ -153,18 +153,16 @@ func (ci *CanonicalizerInstance) DecanonicalizeFilter(bitmap map[uint32]uint32) } // Store sorted list of addresses. Used to binary search when converting PCs. -func setModuleKeys(moduleKeys []uint32, modules []KernelModule) { +func setModuleKeys(moduleKeys []uint64, modules []KernelModule) { for idx, module := range modules { - // Truncate PCs to uint32, assuming that they fit into 32 bits. - // True for x86_64 and arm64 without KASLR. - moduleKeys[idx] = uint32(module.Addr) + moduleKeys[idx] = module.Addr } // Sort modules by address. sort.Slice(moduleKeys, func(i, j int) bool { return moduleKeys[i] < moduleKeys[j] }) } -func findModule(pc uint32, moduleKeys []uint32) (moduleIdx int) { +func findModule(pc uint64, moduleKeys []uint64) (moduleIdx int) { moduleIdx, _ = sort.Find(len(moduleKeys), func(moduleIdx int) int { if pc < moduleKeys[moduleIdx] { return -1 @@ -175,9 +173,9 @@ func findModule(pc uint32, moduleKeys []uint32) (moduleIdx int) { return moduleIdx - 1 } -func (convert *Convert) convertPCs(pcs []uint32) []uint32 { +func (convert *Convert) convertPCs(pcs []uint64) []uint64 { // Convert coverage. - var ret []uint32 + var ret []uint64 convCtx := &convertContext{convert: convert} for _, pc := range pcs { if newPC, ok := convert.convertPC(pc); ok { @@ -192,7 +190,7 @@ func (convert *Convert) convertPCs(pcs []uint32) []uint32 { return ret } -func (convert *Convert) convertPC(pc uint32) (uint32, bool) { +func (convert *Convert) convertPC(pc uint64) (uint64, bool) { moduleIdx := findModule(pc, convert.moduleKeys) // Check if address is above the first module offset. if moduleIdx >= 0 { @@ -205,7 +203,7 @@ func (convert *Convert) convertPC(pc uint32) (uint32, bool) { if module.discard { return pc, false } - pc = uint32(int(pc) + module.offset) + pc = uint64(int(pc) + module.offset) } } return pc, true @@ -219,7 +217,7 @@ func (cc *convertContext) discarded() string { return fmt.Sprintf("%v; not found in module map", errMsg) } -func (cc *convertContext) discard(pc uint32) { +func (cc *convertContext) discard(pc uint64) { cc.errCount += 1 if cc.errPC == 0 { cc.errPC = pc diff --git a/pkg/cover/canonicalizer_test.go b/pkg/cover/canonicalizer_test.go index b826008be..1619a08fa 100644 --- a/pkg/cover/canonicalizer_test.go +++ b/pkg/cover/canonicalizer_test.go @@ -20,12 +20,12 @@ type RPCServer struct { type Fuzzer struct { instModules *CanonicalizerInstance - cov []uint32 - goalCov []uint32 - bitmap map[uint32]uint32 - goalBitmap map[uint32]uint32 - sign []uint32 - goalSign []uint32 + cov []uint64 + goalCov []uint64 + bitmap map[uint64]uint32 + goalBitmap map[uint64]uint32 + sign []uint64 + goalSign []uint64 } type canonicalizeValue int @@ -43,31 +43,31 @@ func TestNilModules(t *testing.T) { serv.connect("f1", nil, true) serv.connect("f2", nil, true) - serv.fuzzers["f1"].cov = []uint32{0x00010000, 0x00020000, 0x00030000, 0x00040000} - serv.fuzzers["f1"].goalCov = []uint32{0x00010000, 0x00020000, 0x00030000, 0x00040000} + serv.fuzzers["f1"].cov = []uint64{0x00010000, 0x00020000, 0x00030000, 0x00040000} + serv.fuzzers["f1"].goalCov = []uint64{0x00010000, 0x00020000, 0x00030000, 0x00040000} - serv.fuzzers["f2"].cov = []uint32{0x00010000, 0x00020000, 0x00030000, 0x00040000} - serv.fuzzers["f2"].goalCov = []uint32{0x00010000, 0x00020000, 0x00030000, 0x00040000} + serv.fuzzers["f2"].cov = []uint64{0x00010000, 0x00020000, 0x00030000, 0x00040000} + serv.fuzzers["f2"].goalCov = []uint64{0x00010000, 0x00020000, 0x00030000, 0x00040000} - serv.fuzzers["f1"].bitmap = map[uint32]uint32{ + serv.fuzzers["f1"].bitmap = map[uint64]uint32{ 0x00010011: 1, 0x00020FFF: 2, 0x00030000: 3, 0x00040000: 4, } - serv.fuzzers["f1"].goalBitmap = map[uint32]uint32{ + serv.fuzzers["f1"].goalBitmap = map[uint64]uint32{ 0x00010011: 1, 0x00020FFF: 2, 0x00030000: 3, 0x00040000: 4, } - serv.fuzzers["f2"].bitmap = map[uint32]uint32{ + serv.fuzzers["f2"].bitmap = map[uint64]uint32{ 0x00010011: 1, 0x00020FFF: 2, 0x00030000: 3, 0x00040000: 4, } - serv.fuzzers["f2"].goalBitmap = map[uint32]uint32{ + serv.fuzzers["f2"].goalBitmap = map[uint64]uint32{ 0x00010011: 1, 0x00020FFF: 2, 0x00030000: 3, @@ -78,9 +78,9 @@ func TestNilModules(t *testing.T) { t.Fatalf("failed in canonicalization: %v", err) } - serv.fuzzers["f1"].goalCov = []uint32{0x00010000, 0x00020000, 0x00030000, 0x00040000} + serv.fuzzers["f1"].goalCov = []uint64{0x00010000, 0x00020000, 0x00030000, 0x00040000} serv.fuzzers["f1"].goalSign = serv.fuzzers["f1"].goalCov - serv.fuzzers["f2"].goalCov = []uint32{0x00010000, 0x00020000, 0x00030000, 0x00040000} + serv.fuzzers["f2"].goalCov = []uint64{0x00010000, 0x00020000, 0x00030000, 0x00040000} serv.fuzzers["f2"].goalSign = serv.fuzzers["f2"].goalCov if err := serv.runTest(Decanonicalize); err != "" { t.Fatalf("failed in decanonicalization: %v", err) @@ -103,7 +103,7 @@ func TestDisabledSignals(t *testing.T) { f2Modules := initModules(f2ModuleAddresses, f2ModuleSizes) serv.connect("f2", f2Modules, false) - pcs := []uint32{0x00010000, 0x00020000, 0x00030000, 0x00040000} + pcs := []uint64{0x00010000, 0x00020000, 0x00030000, 0x00040000} serv.fuzzers["f1"].cov = pcs serv.fuzzers["f1"].goalCov = pcs @@ -140,37 +140,37 @@ func TestModules(t *testing.T) { // f1 is the "canonical" fuzzer as it is first one instantiated. // This means that all coverage output should be the same as the inputs. - serv.fuzzers["f1"].cov = []uint32{0x00010000, 0x00015000, 0x00020000, 0x00025000, 0x00030000, + serv.fuzzers["f1"].cov = []uint64{0x00010000, 0x00015000, 0x00020000, 0x00025000, 0x00030000, 0x00035000, 0x00040000, 0x00045000, 0x00050000, 0x00055000} - serv.fuzzers["f1"].goalCov = []uint32{0x00010000, 0x00015000, 0x00020000, 0x00025000, 0x00030000, + serv.fuzzers["f1"].goalCov = []uint64{0x00010000, 0x00015000, 0x00020000, 0x00025000, 0x00030000, 0x00035000, 0x00040000, 0x00045000, 0x00050000, 0x00055000} // The modules addresss are inverted between: (2 and 4), (3 and 5), // affecting the output canonical coverage values in these ranges. - serv.fuzzers["f2"].cov = []uint32{0x00010000, 0x00015000, 0x00020000, 0x00025000, 0x00030000, + serv.fuzzers["f2"].cov = []uint64{0x00010000, 0x00015000, 0x00020000, 0x00025000, 0x00030000, 0x00035000, 0x00040000, 0x00045000, 0x00050000, 0x00055000} - serv.fuzzers["f2"].goalCov = []uint32{0x00010000, 0x00015000, 0x00040000, 0x00025000, 0x00045000, + serv.fuzzers["f2"].goalCov = []uint64{0x00010000, 0x00015000, 0x00040000, 0x00025000, 0x00045000, 0x0004a000, 0x00020000, 0x00030000, 0x0003b000, 0x00055000} - serv.fuzzers["f1"].bitmap = map[uint32]uint32{ + serv.fuzzers["f1"].bitmap = map[uint64]uint32{ 0x00010011: 1, 0x00020FFF: 2, 0x00030000: 3, 0x00040000: 4, } - serv.fuzzers["f1"].goalBitmap = map[uint32]uint32{ + serv.fuzzers["f1"].goalBitmap = map[uint64]uint32{ 0x00010011: 1, 0x00020FFF: 2, 0x00030000: 3, 0x00040000: 4, } - serv.fuzzers["f2"].bitmap = map[uint32]uint32{ + serv.fuzzers["f2"].bitmap = map[uint64]uint32{ 0x00010011: 1, 0x00020FFF: 2, 0x00030000: 3, 0x00040000: 4, } - serv.fuzzers["f2"].goalBitmap = map[uint32]uint32{ + serv.fuzzers["f2"].goalBitmap = map[uint64]uint32{ 0x00010011: 1, 0x00040FFF: 2, 0x00045000: 3, @@ -181,9 +181,9 @@ func TestModules(t *testing.T) { t.Fatalf("failed in canonicalization: %v", err) } - serv.fuzzers["f1"].goalCov = []uint32{0x00010000, 0x00015000, 0x00020000, 0x00025000, 0x00030000, + serv.fuzzers["f1"].goalCov = []uint64{0x00010000, 0x00015000, 0x00020000, 0x00025000, 0x00030000, 0x00035000, 0x00040000, 0x00045000, 0x00050000, 0x00055000} - serv.fuzzers["f2"].goalCov = []uint32{0x00010000, 0x00015000, 0x00020000, 0x00025000, 0x00030000, + serv.fuzzers["f2"].goalCov = []uint64{0x00010000, 0x00015000, 0x00020000, 0x00025000, 0x00030000, 0x00035000, 0x00040000, 0x00045000, 0x00050000, 0x00055000} if err := serv.runTest(Decanonicalize); err != "" { t.Fatalf("failed in decanonicalization: %v", err) @@ -209,21 +209,21 @@ func TestChangingModules(t *testing.T) { // Module 2 is not present in the "canonical" fuzzer, so coverage values // in this range should be deleted. - serv.fuzzers["f2"].cov = []uint32{0x00010000, 0x00015000, 0x00020000, 0x00025000} - serv.fuzzers["f2"].goalCov = []uint32{0x00010000, 0x00015000, 0x00025000} + serv.fuzzers["f2"].cov = []uint64{0x00010000, 0x00015000, 0x00020000, 0x00025000} + serv.fuzzers["f2"].goalCov = []uint64{0x00010000, 0x00015000, 0x00025000} if err := serv.runTest(Canonicalize); err != "" { t.Fatalf("failed in canonicalization: %v", err) } - serv.fuzzers["f2"].goalCov = []uint32{0x00010000, 0x00015000, 0x00025000} + serv.fuzzers["f2"].goalCov = []uint64{0x00010000, 0x00015000, 0x00025000} if err := serv.runTest(Decanonicalize); err != "" { t.Fatalf("failed in decanonicalization: %v", err) } } func (serv *RPCServer) runTest(val canonicalizeValue) string { - var cov []uint32 + var cov []uint64 for name, fuzzer := range serv.fuzzers { if val == Canonicalize { cov = fuzzer.instModules.Canonicalize(fuzzer.cov) diff --git a/pkg/cover/cover.go b/pkg/cover/cover.go index 814ae4fb6..d863dcde6 100644 --- a/pkg/cover/cover.go +++ b/pkg/cover/cover.go @@ -4,9 +4,9 @@ // Package cover provides types for working with coverage information (arrays of covered PCs). package cover -type Cover map[uint32]struct{} +type Cover map[uint64]struct{} -func (cov *Cover) Merge(raw []uint32) { +func (cov *Cover) Merge(raw []uint64) { c := *cov if c == nil { c = make(Cover) @@ -18,7 +18,7 @@ func (cov *Cover) Merge(raw []uint32) { } // Merge merges raw into coverage and returns newly added PCs. Overwrites/mutates raw. -func (cov *Cover) MergeDiff(raw []uint32) []uint32 { +func (cov *Cover) MergeDiff(raw []uint64) []uint64 { c := *cov if c == nil { c = make(Cover) @@ -36,8 +36,8 @@ func (cov *Cover) MergeDiff(raw []uint32) []uint32 { return raw[:n] } -func (cov Cover) Serialize() []uint32 { - res := make([]uint32, 0, len(cov)) +func (cov Cover) Serialize() []uint64 { + res := make([]uint64, 0, len(cov)) for pc := range cov { res = append(res, pc) } diff --git a/pkg/cover/cover_test.go b/pkg/cover/cover_test.go index d5f214ff5..169dbf08c 100644 --- a/pkg/cover/cover_test.go +++ b/pkg/cover/cover_test.go @@ -14,35 +14,35 @@ import ( func TestMergeDiff(t *testing.T) { type Test struct { - init []uint32 - merge []uint32 - diff []uint32 - result []uint32 + init []uint64 + merge []uint64 + diff []uint64 + result []uint64 } tests := []Test{ { init: nil, merge: nil, diff: nil, - result: []uint32{}, + result: []uint64{}, }, { - init: []uint32{0, 1, 3, 4}, + init: []uint64{0, 1, 3, 4}, merge: nil, diff: nil, - result: []uint32{0, 1, 3, 4}, + result: []uint64{0, 1, 3, 4}, }, { init: nil, - merge: []uint32{0, 1, 3, 4}, - diff: []uint32{0, 1, 3, 4}, - result: []uint32{0, 1, 3, 4}, + merge: []uint64{0, 1, 3, 4}, + diff: []uint64{0, 1, 3, 4}, + result: []uint64{0, 1, 3, 4}, }, { - init: []uint32{0, 1, 3, 4}, - merge: []uint32{4, 7, 1, 9}, - diff: []uint32{7, 9}, - result: []uint32{0, 1, 3, 4, 7, 9}, + init: []uint64{0, 1, 3, 4}, + merge: []uint64{4, 7, 1, 9}, + diff: []uint64{7, 9}, + result: []uint64{0, 1, 3, 4, 7, 9}, }, } for i, test := range tests { diff --git a/pkg/cover/html.go b/pkg/cover/html.go index 5a78b75f8..935a19efc 100644 --- a/pkg/cover/html.go +++ b/pkg/cover/html.go @@ -25,7 +25,7 @@ import ( type CoverHandlerParams struct { Progs []Prog - CoverFilter map[uint32]uint32 + CoverFilter map[uint64]uint32 Debug bool Force bool } @@ -297,7 +297,7 @@ func (rg *ReportGenerator) DoFilterPCs(w io.Writer, params CoverHandlerParams) e continue } uniquePCs[pc] = true - if params.CoverFilter[uint32(pc)] != 0 { + if params.CoverFilter[pc] != 0 { pcs = append(pcs, pc) } } @@ -620,12 +620,12 @@ func (rg *ReportGenerator) DoCSV(w io.Writer, params CoverHandlerParams) error { return writer.WriteAll(data) } -func fixUpPCs(target string, progs []Prog, coverFilter map[uint32]uint32) []Prog { +func fixUpPCs(target string, progs []Prog, coverFilter map[uint64]uint32) []Prog { if coverFilter != nil { for i, prog := range progs { var nPCs []uint64 for _, pc := range prog.PCs { - if coverFilter[uint32(pc)] != 0 { + if coverFilter[pc] != 0 { nPCs = append(nPCs, pc) } } diff --git a/pkg/cover/report.go b/pkg/cover/report.go index b0113ca14..06c1e13f5 100644 --- a/pkg/cover/report.go +++ b/pkg/cover/report.go @@ -30,8 +30,6 @@ type Prog struct { type KernelModule = backend.KernelModule -var RestorePC = backend.RestorePC - func MakeReportGenerator(cfg *mgrconfig.Config, subsystem []mgrconfig.Subsystem, modules []KernelModule, rawCover bool) (*ReportGenerator, error) { impl, err := backend.Make(cfg.SysTarget, cfg.Type, cfg.KernelObj, diff --git a/pkg/flatrpc/flatrpc.fbs b/pkg/flatrpc/flatrpc.fbs index c628d9b53..cdddef965 100644 --- a/pkg/flatrpc/flatrpc.fbs +++ b/pkg/flatrpc/flatrpc.fbs @@ -149,15 +149,15 @@ table ExecRequestRaw { prog_data :[uint8]; exec_opts :ExecOptsRaw; flags :RequestFlag; - signal_filter :[uint32]; + signal_filter :[uint64]; signal_filter_call :int32; // Repeat the program that many times (0 means 1). repeat :int32; } table SignalUpdateRaw { - new_max :[uint32]; - drop_max :[uint32]; + new_max :[uint64]; + drop_max :[uint64]; } // Notification from the executor that it started executing the program 'id'. @@ -184,10 +184,10 @@ table CallInfoRaw { // Call errno (0 if the call was successful). error :int32; // Feedback signal, filled if ExecFlag.CollectSignal is set. - signal :[uint32]; + signal :[uint64]; // Code coverage, filled if ExecFlag.CollectCover is set. // If ExecFlag.DedupCover is set, then duplicates are removed, otherwise it contains a trace. - cover :[uint32]; + cover :[uint64]; // Comparison operands. comps :[ComparisonRaw]; } diff --git a/pkg/flatrpc/flatrpc.go b/pkg/flatrpc/flatrpc.go index 12e9a773d..2ef9b2595 100644 --- a/pkg/flatrpc/flatrpc.go +++ b/pkg/flatrpc/flatrpc.go @@ -1688,7 +1688,7 @@ type ExecRequestRawT struct { ProgData []byte `json:"prog_data"` ExecOpts *ExecOptsRawT `json:"exec_opts"` Flags RequestFlag `json:"flags"` - SignalFilter []uint32 `json:"signal_filter"` + SignalFilter []uint64 `json:"signal_filter"` SignalFilterCall int32 `json:"signal_filter_call"` Repeat int32 `json:"repeat"` } @@ -1706,7 +1706,7 @@ func (t *ExecRequestRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffset signalFilterLength := len(t.SignalFilter) ExecRequestRawStartSignalFilterVector(builder, signalFilterLength) for j := signalFilterLength - 1; j >= 0; j-- { - builder.PrependUint32(t.SignalFilter[j]) + builder.PrependUint64(t.SignalFilter[j]) } signalFilterOffset = builder.EndVector(signalFilterLength) } @@ -1728,7 +1728,7 @@ func (rcv *ExecRequestRaw) UnPackTo(t *ExecRequestRawT) { t.ExecOpts = rcv.ExecOpts(nil).UnPack() t.Flags = rcv.Flags() signalFilterLength := rcv.SignalFilterLength() - t.SignalFilter = make([]uint32, signalFilterLength) + t.SignalFilter = make([]uint64, signalFilterLength) for j := 0; j < signalFilterLength; j++ { t.SignalFilter[j] = rcv.SignalFilter(j) } @@ -1843,11 +1843,11 @@ func (rcv *ExecRequestRaw) MutateFlags(n RequestFlag) bool { return rcv._tab.MutateUint64Slot(10, uint64(n)) } -func (rcv *ExecRequestRaw) SignalFilter(j int) uint32 { +func (rcv *ExecRequestRaw) SignalFilter(j int) uint64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) if o != 0 { a := rcv._tab.Vector(o) - return rcv._tab.GetUint32(a + flatbuffers.UOffsetT(j*4)) + return rcv._tab.GetUint64(a + flatbuffers.UOffsetT(j*8)) } return 0 } @@ -1860,11 +1860,11 @@ func (rcv *ExecRequestRaw) SignalFilterLength() int { return 0 } -func (rcv *ExecRequestRaw) MutateSignalFilter(j int, n uint32) bool { +func (rcv *ExecRequestRaw) MutateSignalFilter(j int, n uint64) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) if o != 0 { a := rcv._tab.Vector(o) - return rcv._tab.MutateUint32(a+flatbuffers.UOffsetT(j*4), n) + return rcv._tab.MutateUint64(a+flatbuffers.UOffsetT(j*8), n) } return false } @@ -1915,7 +1915,7 @@ func ExecRequestRawAddSignalFilter(builder *flatbuffers.Builder, signalFilter fl builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(signalFilter), 0) } func ExecRequestRawStartSignalFilterVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { - return builder.StartVector(4, numElems, 4) + return builder.StartVector(8, numElems, 8) } func ExecRequestRawAddSignalFilterCall(builder *flatbuffers.Builder, signalFilterCall int32) { builder.PrependInt32Slot(5, signalFilterCall, 0) @@ -1928,8 +1928,8 @@ func ExecRequestRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { } type SignalUpdateRawT struct { - NewMax []uint32 `json:"new_max"` - DropMax []uint32 `json:"drop_max"` + NewMax []uint64 `json:"new_max"` + DropMax []uint64 `json:"drop_max"` } func (t *SignalUpdateRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT { @@ -1941,7 +1941,7 @@ func (t *SignalUpdateRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse newMaxLength := len(t.NewMax) SignalUpdateRawStartNewMaxVector(builder, newMaxLength) for j := newMaxLength - 1; j >= 0; j-- { - builder.PrependUint32(t.NewMax[j]) + builder.PrependUint64(t.NewMax[j]) } newMaxOffset = builder.EndVector(newMaxLength) } @@ -1950,7 +1950,7 @@ func (t *SignalUpdateRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse dropMaxLength := len(t.DropMax) SignalUpdateRawStartDropMaxVector(builder, dropMaxLength) for j := dropMaxLength - 1; j >= 0; j-- { - builder.PrependUint32(t.DropMax[j]) + builder.PrependUint64(t.DropMax[j]) } dropMaxOffset = builder.EndVector(dropMaxLength) } @@ -1962,12 +1962,12 @@ func (t *SignalUpdateRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse func (rcv *SignalUpdateRaw) UnPackTo(t *SignalUpdateRawT) { newMaxLength := rcv.NewMaxLength() - t.NewMax = make([]uint32, newMaxLength) + t.NewMax = make([]uint64, newMaxLength) for j := 0; j < newMaxLength; j++ { t.NewMax[j] = rcv.NewMax(j) } dropMaxLength := rcv.DropMaxLength() - t.DropMax = make([]uint32, dropMaxLength) + t.DropMax = make([]uint64, dropMaxLength) for j := 0; j < dropMaxLength; j++ { t.DropMax[j] = rcv.DropMax(j) } @@ -2009,11 +2009,11 @@ func (rcv *SignalUpdateRaw) Table() flatbuffers.Table { return rcv._tab } -func (rcv *SignalUpdateRaw) NewMax(j int) uint32 { +func (rcv *SignalUpdateRaw) NewMax(j int) uint64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { a := rcv._tab.Vector(o) - return rcv._tab.GetUint32(a + flatbuffers.UOffsetT(j*4)) + return rcv._tab.GetUint64(a + flatbuffers.UOffsetT(j*8)) } return 0 } @@ -2026,20 +2026,20 @@ func (rcv *SignalUpdateRaw) NewMaxLength() int { return 0 } -func (rcv *SignalUpdateRaw) MutateNewMax(j int, n uint32) bool { +func (rcv *SignalUpdateRaw) MutateNewMax(j int, n uint64) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { a := rcv._tab.Vector(o) - return rcv._tab.MutateUint32(a+flatbuffers.UOffsetT(j*4), n) + return rcv._tab.MutateUint64(a+flatbuffers.UOffsetT(j*8), n) } return false } -func (rcv *SignalUpdateRaw) DropMax(j int) uint32 { +func (rcv *SignalUpdateRaw) DropMax(j int) uint64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { a := rcv._tab.Vector(o) - return rcv._tab.GetUint32(a + flatbuffers.UOffsetT(j*4)) + return rcv._tab.GetUint64(a + flatbuffers.UOffsetT(j*8)) } return 0 } @@ -2052,11 +2052,11 @@ func (rcv *SignalUpdateRaw) DropMaxLength() int { return 0 } -func (rcv *SignalUpdateRaw) MutateDropMax(j int, n uint32) bool { +func (rcv *SignalUpdateRaw) MutateDropMax(j int, n uint64) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { a := rcv._tab.Vector(o) - return rcv._tab.MutateUint32(a+flatbuffers.UOffsetT(j*4), n) + return rcv._tab.MutateUint64(a+flatbuffers.UOffsetT(j*8), n) } return false } @@ -2068,13 +2068,13 @@ func SignalUpdateRawAddNewMax(builder *flatbuffers.Builder, newMax flatbuffers.U builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(newMax), 0) } func SignalUpdateRawStartNewMaxVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { - return builder.StartVector(4, numElems, 4) + return builder.StartVector(8, numElems, 8) } func SignalUpdateRawAddDropMax(builder *flatbuffers.Builder, dropMax flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(dropMax), 0) } func SignalUpdateRawStartDropMaxVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { - return builder.StartVector(4, numElems, 4) + return builder.StartVector(8, numElems, 8) } func SignalUpdateRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() @@ -2212,8 +2212,8 @@ func ExecutingMessageRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { type CallInfoRawT struct { Flags CallFlag `json:"flags"` Error int32 `json:"error"` - Signal []uint32 `json:"signal"` - Cover []uint32 `json:"cover"` + Signal []uint64 `json:"signal"` + Cover []uint64 `json:"cover"` Comps []*ComparisonRawT `json:"comps"` } @@ -2226,7 +2226,7 @@ func (t *CallInfoRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT { signalLength := len(t.Signal) CallInfoRawStartSignalVector(builder, signalLength) for j := signalLength - 1; j >= 0; j-- { - builder.PrependUint32(t.Signal[j]) + builder.PrependUint64(t.Signal[j]) } signalOffset = builder.EndVector(signalLength) } @@ -2235,7 +2235,7 @@ func (t *CallInfoRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT { coverLength := len(t.Cover) CallInfoRawStartCoverVector(builder, coverLength) for j := coverLength - 1; j >= 0; j-- { - builder.PrependUint32(t.Cover[j]) + builder.PrependUint64(t.Cover[j]) } coverOffset = builder.EndVector(coverLength) } @@ -2261,12 +2261,12 @@ func (rcv *CallInfoRaw) UnPackTo(t *CallInfoRawT) { t.Flags = rcv.Flags() t.Error = rcv.Error() signalLength := rcv.SignalLength() - t.Signal = make([]uint32, signalLength) + t.Signal = make([]uint64, signalLength) for j := 0; j < signalLength; j++ { t.Signal[j] = rcv.Signal(j) } coverLength := rcv.CoverLength() - t.Cover = make([]uint32, coverLength) + t.Cover = make([]uint64, coverLength) for j := 0; j < coverLength; j++ { t.Cover[j] = rcv.Cover(j) } @@ -2339,11 +2339,11 @@ func (rcv *CallInfoRaw) MutateError(n int32) bool { return rcv._tab.MutateInt32Slot(6, n) } -func (rcv *CallInfoRaw) Signal(j int) uint32 { +func (rcv *CallInfoRaw) Signal(j int) uint64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { a := rcv._tab.Vector(o) - return rcv._tab.GetUint32(a + flatbuffers.UOffsetT(j*4)) + return rcv._tab.GetUint64(a + flatbuffers.UOffsetT(j*8)) } return 0 } @@ -2356,20 +2356,20 @@ func (rcv *CallInfoRaw) SignalLength() int { return 0 } -func (rcv *CallInfoRaw) MutateSignal(j int, n uint32) bool { +func (rcv *CallInfoRaw) MutateSignal(j int, n uint64) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { a := rcv._tab.Vector(o) - return rcv._tab.MutateUint32(a+flatbuffers.UOffsetT(j*4), n) + return rcv._tab.MutateUint64(a+flatbuffers.UOffsetT(j*8), n) } return false } -func (rcv *CallInfoRaw) Cover(j int) uint32 { +func (rcv *CallInfoRaw) Cover(j int) uint64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { a := rcv._tab.Vector(o) - return rcv._tab.GetUint32(a + flatbuffers.UOffsetT(j*4)) + return rcv._tab.GetUint64(a + flatbuffers.UOffsetT(j*8)) } return 0 } @@ -2382,11 +2382,11 @@ func (rcv *CallInfoRaw) CoverLength() int { return 0 } -func (rcv *CallInfoRaw) MutateCover(j int, n uint32) bool { +func (rcv *CallInfoRaw) MutateCover(j int, n uint64) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { a := rcv._tab.Vector(o) - return rcv._tab.MutateUint32(a+flatbuffers.UOffsetT(j*4), n) + return rcv._tab.MutateUint64(a+flatbuffers.UOffsetT(j*8), n) } return false } @@ -2423,13 +2423,13 @@ func CallInfoRawAddSignal(builder *flatbuffers.Builder, signal flatbuffers.UOffs builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(signal), 0) } func CallInfoRawStartSignalVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { - return builder.StartVector(4, numElems, 4) + return builder.StartVector(8, numElems, 8) } func CallInfoRawAddCover(builder *flatbuffers.Builder, cover flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(cover), 0) } func CallInfoRawStartCoverVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { - return builder.StartVector(4, numElems, 4) + return builder.StartVector(8, numElems, 8) } func CallInfoRawAddComps(builder *flatbuffers.Builder, comps flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(comps), 0) diff --git a/pkg/flatrpc/flatrpc.h b/pkg/flatrpc/flatrpc.h index 5721ea90f..649319e08 100644 --- a/pkg/flatrpc/flatrpc.h +++ b/pkg/flatrpc/flatrpc.h @@ -1459,7 +1459,7 @@ struct ExecRequestRawT : public flatbuffers::NativeTable { std::vector<uint8_t> prog_data{}; std::unique_ptr<rpc::ExecOptsRaw> exec_opts{}; rpc::RequestFlag flags = static_cast<rpc::RequestFlag>(0); - std::vector<uint32_t> signal_filter{}; + std::vector<uint64_t> signal_filter{}; int32_t signal_filter_call = 0; int32_t repeat = 0; ExecRequestRawT() = default; @@ -1492,8 +1492,8 @@ struct ExecRequestRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { rpc::RequestFlag flags() const { return static_cast<rpc::RequestFlag>(GetField<uint64_t>(VT_FLAGS, 0)); } - const flatbuffers::Vector<uint32_t> *signal_filter() const { - return GetPointer<const flatbuffers::Vector<uint32_t> *>(VT_SIGNAL_FILTER); + const flatbuffers::Vector<uint64_t> *signal_filter() const { + return GetPointer<const flatbuffers::Vector<uint64_t> *>(VT_SIGNAL_FILTER); } int32_t signal_filter_call() const { return GetField<int32_t>(VT_SIGNAL_FILTER_CALL, 0); @@ -1535,7 +1535,7 @@ struct ExecRequestRawBuilder { void add_flags(rpc::RequestFlag flags) { fbb_.AddElement<uint64_t>(ExecRequestRaw::VT_FLAGS, static_cast<uint64_t>(flags), 0); } - void add_signal_filter(flatbuffers::Offset<flatbuffers::Vector<uint32_t>> signal_filter) { + void add_signal_filter(flatbuffers::Offset<flatbuffers::Vector<uint64_t>> signal_filter) { fbb_.AddOffset(ExecRequestRaw::VT_SIGNAL_FILTER, signal_filter); } void add_signal_filter_call(int32_t signal_filter_call) { @@ -1561,7 +1561,7 @@ inline flatbuffers::Offset<ExecRequestRaw> CreateExecRequestRaw( flatbuffers::Offset<flatbuffers::Vector<uint8_t>> prog_data = 0, const rpc::ExecOptsRaw *exec_opts = nullptr, rpc::RequestFlag flags = static_cast<rpc::RequestFlag>(0), - flatbuffers::Offset<flatbuffers::Vector<uint32_t>> signal_filter = 0, + flatbuffers::Offset<flatbuffers::Vector<uint64_t>> signal_filter = 0, int32_t signal_filter_call = 0, int32_t repeat = 0) { ExecRequestRawBuilder builder_(_fbb); @@ -1581,11 +1581,11 @@ inline flatbuffers::Offset<ExecRequestRaw> CreateExecRequestRawDirect( const std::vector<uint8_t> *prog_data = nullptr, const rpc::ExecOptsRaw *exec_opts = nullptr, rpc::RequestFlag flags = static_cast<rpc::RequestFlag>(0), - const std::vector<uint32_t> *signal_filter = nullptr, + const std::vector<uint64_t> *signal_filter = nullptr, int32_t signal_filter_call = 0, int32_t repeat = 0) { auto prog_data__ = prog_data ? _fbb.CreateVector<uint8_t>(*prog_data) : 0; - auto signal_filter__ = signal_filter ? _fbb.CreateVector<uint32_t>(*signal_filter) : 0; + auto signal_filter__ = signal_filter ? _fbb.CreateVector<uint64_t>(*signal_filter) : 0; return rpc::CreateExecRequestRaw( _fbb, id, @@ -1601,8 +1601,8 @@ flatbuffers::Offset<ExecRequestRaw> CreateExecRequestRaw(flatbuffers::FlatBuffer struct SignalUpdateRawT : public flatbuffers::NativeTable { typedef SignalUpdateRaw TableType; - std::vector<uint32_t> new_max{}; - std::vector<uint32_t> drop_max{}; + std::vector<uint64_t> new_max{}; + std::vector<uint64_t> drop_max{}; }; struct SignalUpdateRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { @@ -1612,11 +1612,11 @@ struct SignalUpdateRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { VT_NEW_MAX = 4, VT_DROP_MAX = 6 }; - const flatbuffers::Vector<uint32_t> *new_max() const { - return GetPointer<const flatbuffers::Vector<uint32_t> *>(VT_NEW_MAX); + const flatbuffers::Vector<uint64_t> *new_max() const { + return GetPointer<const flatbuffers::Vector<uint64_t> *>(VT_NEW_MAX); } - const flatbuffers::Vector<uint32_t> *drop_max() const { - return GetPointer<const flatbuffers::Vector<uint32_t> *>(VT_DROP_MAX); + const flatbuffers::Vector<uint64_t> *drop_max() const { + return GetPointer<const flatbuffers::Vector<uint64_t> *>(VT_DROP_MAX); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && @@ -1635,10 +1635,10 @@ struct SignalUpdateRawBuilder { typedef SignalUpdateRaw Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; - void add_new_max(flatbuffers::Offset<flatbuffers::Vector<uint32_t>> new_max) { + void add_new_max(flatbuffers::Offset<flatbuffers::Vector<uint64_t>> new_max) { fbb_.AddOffset(SignalUpdateRaw::VT_NEW_MAX, new_max); } - void add_drop_max(flatbuffers::Offset<flatbuffers::Vector<uint32_t>> drop_max) { + void add_drop_max(flatbuffers::Offset<flatbuffers::Vector<uint64_t>> drop_max) { fbb_.AddOffset(SignalUpdateRaw::VT_DROP_MAX, drop_max); } explicit SignalUpdateRawBuilder(flatbuffers::FlatBufferBuilder &_fbb) @@ -1654,8 +1654,8 @@ struct SignalUpdateRawBuilder { inline flatbuffers::Offset<SignalUpdateRaw> CreateSignalUpdateRaw( flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset<flatbuffers::Vector<uint32_t>> new_max = 0, - flatbuffers::Offset<flatbuffers::Vector<uint32_t>> drop_max = 0) { + flatbuffers::Offset<flatbuffers::Vector<uint64_t>> new_max = 0, + flatbuffers::Offset<flatbuffers::Vector<uint64_t>> drop_max = 0) { SignalUpdateRawBuilder builder_(_fbb); builder_.add_drop_max(drop_max); builder_.add_new_max(new_max); @@ -1664,10 +1664,10 @@ inline flatbuffers::Offset<SignalUpdateRaw> CreateSignalUpdateRaw( inline flatbuffers::Offset<SignalUpdateRaw> CreateSignalUpdateRawDirect( flatbuffers::FlatBufferBuilder &_fbb, - const std::vector<uint32_t> *new_max = nullptr, - const std::vector<uint32_t> *drop_max = nullptr) { - auto new_max__ = new_max ? _fbb.CreateVector<uint32_t>(*new_max) : 0; - auto drop_max__ = drop_max ? _fbb.CreateVector<uint32_t>(*drop_max) : 0; + const std::vector<uint64_t> *new_max = nullptr, + const std::vector<uint64_t> *drop_max = nullptr) { + auto new_max__ = new_max ? _fbb.CreateVector<uint64_t>(*new_max) : 0; + auto drop_max__ = drop_max ? _fbb.CreateVector<uint64_t>(*drop_max) : 0; return rpc::CreateSignalUpdateRaw( _fbb, new_max__, @@ -1765,8 +1765,8 @@ struct CallInfoRawT : public flatbuffers::NativeTable { typedef CallInfoRaw TableType; rpc::CallFlag flags = static_cast<rpc::CallFlag>(0); int32_t error = 0; - std::vector<uint32_t> signal{}; - std::vector<uint32_t> cover{}; + std::vector<uint64_t> signal{}; + std::vector<uint64_t> cover{}; std::vector<rpc::ComparisonRaw> comps{}; }; @@ -1786,11 +1786,11 @@ struct CallInfoRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { int32_t error() const { return GetField<int32_t>(VT_ERROR, 0); } - const flatbuffers::Vector<uint32_t> *signal() const { - return GetPointer<const flatbuffers::Vector<uint32_t> *>(VT_SIGNAL); + const flatbuffers::Vector<uint64_t> *signal() const { + return GetPointer<const flatbuffers::Vector<uint64_t> *>(VT_SIGNAL); } - const flatbuffers::Vector<uint32_t> *cover() const { - return GetPointer<const flatbuffers::Vector<uint32_t> *>(VT_COVER); + const flatbuffers::Vector<uint64_t> *cover() const { + return GetPointer<const flatbuffers::Vector<uint64_t> *>(VT_COVER); } const flatbuffers::Vector<const rpc::ComparisonRaw *> *comps() const { return GetPointer<const flatbuffers::Vector<const rpc::ComparisonRaw *> *>(VT_COMPS); @@ -1822,10 +1822,10 @@ struct CallInfoRawBuilder { void add_error(int32_t error) { fbb_.AddElement<int32_t>(CallInfoRaw::VT_ERROR, error, 0); } - void add_signal(flatbuffers::Offset<flatbuffers::Vector<uint32_t>> signal) { + void add_signal(flatbuffers::Offset<flatbuffers::Vector<uint64_t>> signal) { fbb_.AddOffset(CallInfoRaw::VT_SIGNAL, signal); } - void add_cover(flatbuffers::Offset<flatbuffers::Vector<uint32_t>> cover) { + void add_cover(flatbuffers::Offset<flatbuffers::Vector<uint64_t>> cover) { fbb_.AddOffset(CallInfoRaw::VT_COVER, cover); } void add_comps(flatbuffers::Offset<flatbuffers::Vector<const rpc::ComparisonRaw *>> comps) { @@ -1846,8 +1846,8 @@ inline flatbuffers::Offset<CallInfoRaw> CreateCallInfoRaw( flatbuffers::FlatBufferBuilder &_fbb, rpc::CallFlag flags = static_cast<rpc::CallFlag>(0), int32_t error = 0, - flatbuffers::Offset<flatbuffers::Vector<uint32_t>> signal = 0, - flatbuffers::Offset<flatbuffers::Vector<uint32_t>> cover = 0, + flatbuffers::Offset<flatbuffers::Vector<uint64_t>> signal = 0, + flatbuffers::Offset<flatbuffers::Vector<uint64_t>> cover = 0, flatbuffers::Offset<flatbuffers::Vector<const rpc::ComparisonRaw *>> comps = 0) { CallInfoRawBuilder builder_(_fbb); builder_.add_comps(comps); @@ -1862,11 +1862,11 @@ inline flatbuffers::Offset<CallInfoRaw> CreateCallInfoRawDirect( flatbuffers::FlatBufferBuilder &_fbb, rpc::CallFlag flags = static_cast<rpc::CallFlag>(0), int32_t error = 0, - const std::vector<uint32_t> *signal = nullptr, - const std::vector<uint32_t> *cover = nullptr, + const std::vector<uint64_t> *signal = nullptr, + const std::vector<uint64_t> *cover = nullptr, const std::vector<rpc::ComparisonRaw> *comps = nullptr) { - auto signal__ = signal ? _fbb.CreateVector<uint32_t>(*signal) : 0; - auto cover__ = cover ? _fbb.CreateVector<uint32_t>(*cover) : 0; + auto signal__ = signal ? _fbb.CreateVector<uint64_t>(*signal) : 0; + auto cover__ = cover ? _fbb.CreateVector<uint64_t>(*cover) : 0; auto comps__ = comps ? _fbb.CreateVectorOfStructs<rpc::ComparisonRaw>(*comps) : 0; return rpc::CreateCallInfoRaw( _fbb, diff --git a/pkg/fuzzer/cover.go b/pkg/fuzzer/cover.go index 03580128d..c34a3b219 100644 --- a/pkg/fuzzer/cover.go +++ b/pkg/fuzzer/cover.go @@ -34,7 +34,7 @@ func (cover *Cover) AddMaxSignal(sign signal.Signal) { cover.dropSignal.Subtract(sign) } -func (cover *Cover) addRawMaxSignal(signal []uint32, prio uint8) signal.Signal { +func (cover *Cover) addRawMaxSignal(signal []uint64, prio uint8) signal.Signal { cover.mu.Lock() defer cover.mu.Unlock() diff := cover.maxSignal.DiffRaw(signal, prio) diff --git a/pkg/fuzzer/fuzzer_test.go b/pkg/fuzzer/fuzzer_test.go index 9ac3376ac..206469fda 100644 --- a/pkg/fuzzer/fuzzer_test.go +++ b/pkg/fuzzer/fuzzer_test.go @@ -144,9 +144,9 @@ func TestRotate(t *testing.T) { }, rand.New(testutil.RandSource(t)), target) fakeSignal := func(size int) signal.Signal { - var pc []uint32 + var pc []uint64 for i := 0; i < size; i++ { - pc = append(pc, uint32(i)) + pc = append(pc, uint64(i)) } return signal.FromRaw(pc, 0) } @@ -188,8 +188,8 @@ func emulateExec(req *queue.Request) (*queue.Result, string, error) { serializedLines := bytes.Split(req.Prog.Serialize(), []byte("\n")) var info flatrpc.ProgInfo for i, call := range req.Prog.Calls { - cover := []uint32{uint32(call.Meta.ID*1024) + - crc32.Checksum(serializedLines[i], crc32q)%4} + cover := []uint64{uint64(call.Meta.ID*1024) + + uint64(crc32.Checksum(serializedLines[i], crc32q)%4)} callInfo := &flatrpc.CallInfo{} if req.ExecOpts.ExecFlags&flatrpc.ExecFlagCollectCover > 0 { callInfo.Cover = cover diff --git a/pkg/fuzzer/job.go b/pkg/fuzzer/job.go index a5a22b2e8..dd3dc4a1a 100644 --- a/pkg/fuzzer/job.go +++ b/pkg/fuzzer/job.go @@ -136,7 +136,7 @@ type deflakedCover struct { stableSignal signal.Signal newStableSignal signal.Signal cover cover.Cover - rawCover []uint32 + rawCover []uint64 } func (job *triageJob) deflake(exec func(*queue.Request, ProgTypes) *queue.Result, stat *stats.Val, @@ -241,7 +241,7 @@ func reexecutionSuccess(info *flatrpc.ProgInfo, oldInfo *flatrpc.CallInfo, call return info.Extra != nil && len(info.Extra.Signal) != 0 } -func getSignalAndCover(p *prog.Prog, info *flatrpc.ProgInfo, call int) (signal.Signal, []uint32) { +func getSignalAndCover(p *prog.Prog, info *flatrpc.ProgInfo, call int) (signal.Signal, []uint64) { inf := info.Extra if call != -1 { inf = info.Calls[call] diff --git a/pkg/fuzzer/job_test.go b/pkg/fuzzer/job_test.go index 70d083be5..cbb99daec 100644 --- a/pkg/fuzzer/job_test.go +++ b/pkg/fuzzer/job_test.go @@ -25,14 +25,14 @@ func TestDeflakeFail(t *testing.T) { testJob := &triageJob{ p: prog, info: &flatrpc.CallInfo{}, - newSignal: signal.FromRaw([]uint32{0, 1, 2, 3, 4}, 0), + newSignal: signal.FromRaw([]uint64{0, 1, 2, 3, 4}, 0), } run := 0 ret, stop := testJob.deflake(func(_ *queue.Request, _ ProgTypes) *queue.Result { run++ // For first, we return 0 and 1. For second, 1 and 2. And so on. - return fakeResult(0, []uint32{uint32(run), uint32(run + 1)}, []uint32{10, 20}) + return fakeResult(0, []uint64{uint64(run), uint64(run + 1)}, []uint64{10, 20}) }, nil, false) assert.False(t, stop) assert.Equal(t, 5, run) @@ -51,21 +51,21 @@ func TestDeflakeSuccess(t *testing.T) { testJob := &triageJob{ p: prog, info: &flatrpc.CallInfo{}, - newSignal: signal.FromRaw([]uint32{0, 1, 2}, 0), + newSignal: signal.FromRaw([]uint64{0, 1, 2}, 0), } run := 0 ret, stop := testJob.deflake(func(_ *queue.Request, _ ProgTypes) *queue.Result { run++ switch run { case 1: - return fakeResult(0, []uint32{0, 2, 4, 6, 8}, []uint32{10, 20}) + return fakeResult(0, []uint64{0, 2, 4, 6, 8}, []uint64{10, 20}) case 2: // This one should be ignored -- it has a different errno. - return fakeResult(1, []uint32{0, 1, 2}, []uint32{100}) + return fakeResult(1, []uint64{0, 1, 2}, []uint64{100}) case 3: - return fakeResult(0, []uint32{0, 2, 4, 6, 8}, []uint32{20, 30}) + return fakeResult(0, []uint64{0, 2, 4, 6, 8}, []uint64{20, 30}) case 4: - return fakeResult(0, []uint32{0, 2, 6}, []uint32{30, 40}) + return fakeResult(0, []uint64{0, 2, 6}, []uint64{30, 40}) } // We expect it to have finished earlier. t.Fatal("only 4 runs were expected") @@ -73,14 +73,14 @@ func TestDeflakeSuccess(t *testing.T) { }, nil, false) assert.False(t, stop) // Cover is a union of all coverages. - assert.ElementsMatch(t, []uint32{10, 20, 30, 40}, ret.cover.Serialize()) + assert.ElementsMatch(t, []uint64{10, 20, 30, 40}, ret.cover.Serialize()) // 0, 2, 6 were in three resuls. - assert.ElementsMatch(t, []uint32{0, 2, 6}, ret.stableSignal.ToRaw()) + assert.ElementsMatch(t, []uint64{0, 2, 6}, ret.stableSignal.ToRaw()) // 0, 2 were also in newSignal. - assert.ElementsMatch(t, []uint32{0, 2}, ret.newStableSignal.ToRaw()) + assert.ElementsMatch(t, []uint64{0, 2}, ret.newStableSignal.ToRaw()) } -func fakeResult(errno int32, signal, cover []uint32) *queue.Result { +func fakeResult(errno int32, signal, cover []uint64) *queue.Result { return &queue.Result{ Info: &flatrpc.ProgInfo{ Calls: []*flatrpc.CallInfo{ diff --git a/pkg/ipc/ipc.go b/pkg/ipc/ipc.go index 1b356c38c..84a7b9541 100644 --- a/pkg/ipc/ipc.go +++ b/pkg/ipc/ipc.go @@ -359,11 +359,11 @@ func (env *Env) parseOutput(opts *flatrpc.ExecOpts, ncalls int) (*flatrpc.ProgIn extraParts = append(extraParts, flatrpc.CallInfo{}) inf = &extraParts[len(extraParts)-1] } - if inf.Signal, ok = readUint32Array(&out, reply.signalSize); !ok { + if inf.Signal, ok = readUint64Array(&out, reply.signalSize); !ok { return nil, fmt.Errorf("call %v/%v/%v: signal overflow: %v/%v", i, reply.index, reply.num, reply.signalSize, len(out)) } - if inf.Cover, ok = readUint32Array(&out, reply.coverSize); !ok { + if inf.Cover, ok = readUint64Array(&out, reply.coverSize); !ok { return nil, fmt.Errorf("call %v/%v/%v: cover overflow: %v/%v", i, reply.index, reply.num, reply.coverSize, len(out)) } @@ -397,10 +397,10 @@ func convertExtra(extraParts []flatrpc.CallInfo, dedupCover bool) *flatrpc.CallI for _, part := range extraParts { extraSignal.Merge(signal.FromRaw(part.Signal, 0)) } - extra.Signal = make([]uint32, len(extraSignal)) + extra.Signal = make([]uint64, len(extraSignal)) i := 0 for s := range extraSignal { - extra.Signal[i] = uint32(s) + extra.Signal[i] = uint64(s) i++ } return &extra @@ -466,17 +466,16 @@ func readUint64(outp *[]byte) (uint64, bool) { return v, true } -func readUint32Array(outp *[]byte, size uint32) ([]uint32, bool) { +func readUint64Array(outp *[]byte, size uint32) ([]uint64, bool) { if size == 0 { return nil, true } out := *outp - dataSize := int(size * 4) + dataSize := int(size * 8) if dataSize > len(out) { return nil, false } - // "Convert" the data to uint32. - res := unsafe.Slice((*uint32)(unsafe.Pointer(&out[0])), size) + res := unsafe.Slice((*uint64)(unsafe.Pointer(&out[0])), size) *outp = out[dataSize:] // Detach the resulting array from the original data. return slices.Clone(res), true diff --git a/pkg/signal/signal.go b/pkg/signal/signal.go index 10a1ef0cb..5f6da48cb 100644 --- a/pkg/signal/signal.go +++ b/pkg/signal/signal.go @@ -7,7 +7,7 @@ package signal import "math/rand" type ( - elemType uint32 + elemType uint64 prioType int8 ) @@ -50,7 +50,7 @@ func (s *Signal) Split(n int) Signal { return c } -func FromRaw(raw []uint32, prio uint8) Signal { +func FromRaw(raw []uint64, prio uint8) Signal { if len(raw) == 0 { return nil } @@ -78,7 +78,7 @@ func (s Signal) Diff(s1 Signal) Signal { return res } -func (s Signal) DiffRaw(raw []uint32, prio uint8) Signal { +func (s Signal) DiffRaw(raw []uint64, prio uint8) Signal { var res Signal for _, e := range raw { if p, ok := s[elemType(e)]; ok && p >= prioType(prio) { @@ -161,8 +161,8 @@ func (s Signal) RandomSubset(r *rand.Rand, size int) Signal { // FilterRaw returns a subset of original raw elements that either are not present in ignore, // or coincides with the one in alwaysTake. -func FilterRaw(raw []uint32, ignore, alwaysTake Signal) []uint32 { - var ret []uint32 +func FilterRaw(raw []uint64, ignore, alwaysTake Signal) []uint64 { + var ret []uint64 for _, e := range raw { if _, ok := alwaysTake[elemType(e)]; ok { ret = append(ret, e) @@ -174,8 +174,8 @@ func FilterRaw(raw []uint32, ignore, alwaysTake Signal) []uint32 { } // DiffFromRaw returns a subset of the raw elements that is not present in Signal. -func (s Signal) DiffFromRaw(raw []uint32) []uint32 { - var ret []uint32 +func (s Signal) DiffFromRaw(raw []uint64) []uint64 { + var ret []uint64 for _, e := range raw { if _, ok := s[elemType(e)]; !ok { ret = append(ret, e) @@ -184,10 +184,10 @@ func (s Signal) DiffFromRaw(raw []uint32) []uint32 { return ret } -func (s Signal) ToRaw() []uint32 { - var raw []uint32 +func (s Signal) ToRaw() []uint64 { + var raw []uint64 for e := range s { - raw = append(raw, uint32(e)) + raw = append(raw, uint64(e)) } return raw } diff --git a/pkg/signal/signal_test.go b/pkg/signal/signal_test.go index efd53d2ea..52b91fc19 100644 --- a/pkg/signal/signal_test.go +++ b/pkg/signal/signal_test.go @@ -13,7 +13,7 @@ import ( func TestRandomSubset(t *testing.T) { r := rand.New(testutil.RandSource(t)) - base := FromRaw([]uint32{0, 1, 2, 3, 4}, 0) + base := FromRaw([]uint64{0, 1, 2, 3, 4}, 0) var s Signal for i := 0; i < 1000 && s.Len() < base.Len(); i++ { delta := base.RandomSubset(r, 1) @@ -24,18 +24,18 @@ func TestRandomSubset(t *testing.T) { } func TestSubtract(t *testing.T) { - base := FromRaw([]uint32{0, 1, 2, 3, 4}, 0) + base := FromRaw([]uint64{0, 1, 2, 3, 4}, 0) assert.Equal(t, 5, base.Len()) - base.Subtract(FromRaw([]uint32{0}, 0)) + base.Subtract(FromRaw([]uint64{0}, 0)) assert.Equal(t, 4, base.Len()) - base.Subtract(FromRaw([]uint32{1}, 0)) + base.Subtract(FromRaw([]uint64{1}, 0)) assert.Equal(t, 3, base.Len()) } func TestIntersectsWith(t *testing.T) { - base := FromRaw([]uint32{0, 1, 2, 3, 4}, 1) - assert.True(t, base.IntersectsWith(FromRaw([]uint32{0, 5, 10}, 1))) - assert.False(t, base.IntersectsWith(FromRaw([]uint32{5, 10, 15}, 1))) + base := FromRaw([]uint64{0, 1, 2, 3, 4}, 1) + assert.True(t, base.IntersectsWith(FromRaw([]uint64{0, 5, 10}, 1))) + assert.False(t, base.IntersectsWith(FromRaw([]uint64{5, 10, 15}, 1))) // The other signal has a lower priority. - assert.False(t, base.IntersectsWith(FromRaw([]uint32{0, 1, 2}, 0))) + assert.False(t, base.IntersectsWith(FromRaw([]uint64{0, 1, 2}, 0))) } diff --git a/pkg/vminfo/vminfo_test.go b/pkg/vminfo/vminfo_test.go index 0e5d17ef2..f58e3f7e5 100644 --- a/pkg/vminfo/vminfo_test.go +++ b/pkg/vminfo/vminfo_test.go @@ -106,8 +106,8 @@ func createSuccessfulResults(source queue.Source, stop chan struct{}) { info := &flatrpc.ProgInfo{} for range req.Prog.Calls { info.Calls = append(info.Calls, &flatrpc.CallInfo{ - Cover: []uint32{1}, - Signal: []uint32{1}, + Cover: []uint64{1}, + Signal: []uint64{1}, Comps: []*flatrpc.Comparison{{Op1: 1, Op2: 2}}, }) } |
