diff options
| author | Albert van der Linde <alinde@google.com> | 2020-07-14 07:47:26 +0000 |
|---|---|---|
| committer | Dmitry Vyukov <dvyukov@google.com> | 2020-07-14 12:20:37 +0200 |
| commit | 6f4580264a29fa73097e96b436141a8594b97610 (patch) | |
| tree | 3e184241f624d90f04cffc9d546eee4e03099216 | |
| parent | 230553f68fcaa90508b724edd0dfc806669c1f22 (diff) | |
prog/alloc: align address allocation for aligned[addr]
Calls to alloc didn't respect the alignment attribute. Now
Type.Alignment() is used to ensure each type is correctly
aligned. Existing descriptions with [align[X]] don't have an
issue as they align to small blocks and default align is to
64 bytes. This commits adds support for [align[X]] for an X
larger than 64.
| -rw-r--r-- | prog/alloc.go | 11 | ||||
| -rw-r--r-- | prog/alloc_test.go | 59 | ||||
| -rw-r--r-- | prog/encoding.go | 2 | ||||
| -rw-r--r-- | prog/rand.go | 2 | ||||
| -rw-r--r-- | prog/target.go | 8 | ||||
| -rw-r--r-- | tools/syz-trace2syz/proggen/proggen.go | 2 |
6 files changed, 55 insertions, 29 deletions
diff --git a/prog/alloc.go b/prog/alloc.go index 344ec7a0e..a4b7b7f1f 100644 --- a/prog/alloc.go +++ b/prog/alloc.go @@ -51,13 +51,18 @@ func (ma *memAlloc) noteAlloc(addr0, size0 uint64) { } } -func (ma *memAlloc) alloc(r *randGen, size0 uint64) uint64 { +// alloc returns the next free address of size0 with respect to the given alignment. +func (ma *memAlloc) alloc(r *randGen, size0, alignment0 uint64) uint64 { if size0 == 0 { size0 = 1 } + if alignment0 == 0 { + alignment0 = 1 + } size := (size0 + memAllocGranule - 1) / memAllocGranule + alignment := (alignment0 + memAllocGranule - 1) / memAllocGranule end := ma.size - size - for start := uint64(0); start <= end; start++ { + for start := uint64(0); start <= end; start += alignment { empty := true for i := uint64(0); i < size; i++ { if ma.get(start + i) { @@ -72,7 +77,7 @@ func (ma *memAlloc) alloc(r *randGen, size0 uint64) uint64 { } } ma.bankruptcy() - return ma.alloc(r, size0) + return ma.alloc(r, size0, alignment0) } func (ma *memAlloc) bankruptcy() { diff --git a/prog/alloc_test.go b/prog/alloc_test.go index c83063747..757dbd8ef 100644 --- a/prog/alloc_test.go +++ b/prog/alloc_test.go @@ -11,29 +11,52 @@ import ( func TestMemAlloc(t *testing.T) { t.Parallel() type op struct { - addr uint64 - size int // if positive do noteAlloc, otherwise -- alloc + addr uint64 + size int // if positive do noteAlloc, otherwise -- alloc + alignment uint64 } tests := [][]op{ { // Just sequential allocation. - {0, -1}, - {64, -64}, - {128, -65}, - {256, -16}, - {320, -8}, + {0, -1, 1}, + {64, -64, 1}, + {128, -65, 1}, + {256, -16, 1}, + {320, -8, 1}, }, { // First reserve some memory and then allocate. - {0, 1}, - {64, 63}, - {128, 64}, - {192, 65}, - {320, -1}, - {448, 1}, - {384, -1}, - {576, 1}, - {640, -128}, + {0, 1, 1}, + {64, 63, 1}, + {128, 64, 1}, + {192, 65, 1}, + {320, -1, 1}, + {448, 1, 1}, + {384, -1, 1}, + {576, 1, 1}, + {640, -128, 1}, + }, + { + // Aligned memory allocation. + {0, -1, 1}, + {512, -1, 512}, + {1024, -1, 512}, + {128, -1, 128}, + {64, -1, 1}, + // 128 used, jumps on. + {192, -1, 1}, + {256, -1, 1}, + {320, -1, 1}, + {384, -1, 1}, + {448, -1, 1}, + // 512 used, jumps on. + {576, -1, 1}, + // Next 512 available at 1536. + {1536, -1, 512}, + // Next smallest available. + {640, -1, 1}, + // Next 64 byte aligned block. + {1600, -512, 1}, }, } for ti, test := range tests { @@ -47,9 +70,9 @@ func TestMemAlloc(t *testing.T) { continue } t.Logf("#%v: alloc(%v) = %v", i, -op.size, op.addr) - addr := ma.alloc(nil, uint64(-op.size)) + addr := ma.alloc(nil, uint64(-op.size), op.alignment) if addr != op.addr { - t.Fatalf("bad result %v", addr) + t.Fatalf("bad result %v, expecting %v", addr, op.addr) } } }) diff --git a/prog/encoding.go b/prog/encoding.go index 1f5b2b374..6dd2a2582 100644 --- a/prog/encoding.go +++ b/prog/encoding.go @@ -1017,7 +1017,7 @@ func (p *parser) fixupAutos(prog *Prog) { _ = s case *PtrType: a := arg.(*PointerArg) - a.Address = s.ma.alloc(nil, a.Res.Size()) + a.Address = s.ma.alloc(nil, a.Res.Size(), a.Res.Type().Alignment()) default: panic(fmt.Sprintf("unsupported auto type %T", typ)) } diff --git a/prog/rand.go b/prog/rand.go index 5ff448062..4dfff97e5 100644 --- a/prog/rand.go +++ b/prog/rand.go @@ -336,7 +336,7 @@ func (r *randGen) randString(s *state, t *BufferType) []byte { } func (r *randGen) allocAddr(s *state, typ Type, dir Dir, size uint64, data Arg) *PointerArg { - return MakePointerArg(typ, dir, s.ma.alloc(r, size), data) + return MakePointerArg(typ, dir, s.ma.alloc(r, size, data.Type().Alignment()), data) } func (r *randGen) allocVMA(s *state, typ Type, dir Dir, numPages uint64) *PointerArg { diff --git a/prog/target.go b/prog/target.go index 3dbad8a67..b090613be 100644 --- a/prog/target.go +++ b/prog/target.go @@ -305,14 +305,12 @@ func (pg *Builder) Append(c *Call) error { return nil } -func (pg *Builder) Allocate(size uint64) uint64 { - return pg.ma.alloc(nil, size) +func (pg *Builder) Allocate(size, alignment uint64) uint64 { + return pg.ma.alloc(nil, size, alignment) } func (pg *Builder) AllocateVMA(npages uint64) uint64 { - psize := pg.target.PageSize - addr := pg.ma.alloc(nil, (npages+1)*psize) - return (addr + psize - 1) & ^(psize - 1) + return pg.ma.alloc(nil, npages*pg.target.PageSize, pg.target.PageSize) } func (pg *Builder) Finalize() (*Prog, error) { diff --git a/tools/syz-trace2syz/proggen/proggen.go b/tools/syz-trace2syz/proggen/proggen.go index 7fff07a5f..63bd9d9ff 100644 --- a/tools/syz-trace2syz/proggen/proggen.go +++ b/tools/syz-trace2syz/proggen/proggen.go @@ -470,7 +470,7 @@ func (ctx *context) parseProc(syzType *prog.ProcType, dir prog.Dir, traceType pa } func (ctx *context) addr(syzType prog.Type, dir prog.Dir, size uint64, data prog.Arg) prog.Arg { - return prog.MakePointerArg(syzType, dir, ctx.builder.Allocate(size), data) + return prog.MakePointerArg(syzType, dir, ctx.builder.Allocate(size, data.Type().Alignment()), data) } func shouldSkip(c *parser.Syscall) bool { |
