reg: support for register casting

Adds methods for referencing sub- or super-registers. For example, for
general purpose registers you can now reference As8(), As16(), ... and
for vector AsX(), AsY(), AsZ().

Closes #1
This commit is contained in:
Michael McLoughlin
2018-12-30 18:40:45 -08:00
parent 4644d996ee
commit 18cdf50d7c
16 changed files with 558 additions and 244 deletions

View File

@@ -42,12 +42,13 @@ func Generate() {
os.Exit(Main(cfg, ctx))
}
func GP8v() reg.Virtual { return ctx.GP8v() }
func GP16v() reg.Virtual { return ctx.GP16v() }
func GP32v() reg.Virtual { return ctx.GP32v() }
func GP64v() reg.Virtual { return ctx.GP64v() }
func Xv() reg.Virtual { return ctx.Xv() }
func Yv() reg.Virtual { return ctx.Yv() }
func GP8v() reg.GPVirtual { return ctx.GP8v() }
func GP16v() reg.GPVirtual { return ctx.GP16v() }
func GP32v() reg.GPVirtual { return ctx.GP32v() }
func GP64v() reg.GPVirtual { return ctx.GP64v() }
func Xv() reg.VecVirtual { return ctx.Xv() }
func Yv() reg.VecVirtual { return ctx.Yv() }
func Zv() reg.VecVirtual { return ctx.Zv() }
func Param(name string) gotypes.Component { return ctx.Param(name) }
func ParamIndex(i int) gotypes.Component { return ctx.ParamIndex(i) }

View File

@@ -106,12 +106,12 @@ func IsR64(op Op) bool {
// IsPseudo returns true if op is a pseudo register.
func IsPseudo(op Op) bool {
return IsRegisterKind(op, reg.Internal)
return IsRegisterKind(op, reg.KindPseudo)
}
// IsGP returns true if op is a general-purpose register of size n bytes.
func IsGP(op Op, n uint) bool {
return IsRegisterKindSize(op, reg.GP, n)
return IsRegisterKindSize(op, reg.KindGP, n)
}
// IsXmm0 returns true if op is the X0 register.
@@ -121,12 +121,12 @@ func IsXmm0(op Op) bool {
// IsXmm returns true if op is a 128-bit XMM register.
func IsXmm(op Op) bool {
return IsRegisterKindSize(op, reg.SSEAVX, 16)
return IsRegisterKindSize(op, reg.KindVector, 16)
}
// IsYmm returns true if op is a 256-bit YMM register.
func IsYmm(op Op) bool {
return IsRegisterKindSize(op, reg.SSEAVX, 32)
return IsRegisterKindSize(op, reg.KindVector, 32)
}
// IsRegisterKindSize returns true if op is a register of the given kind and size in bytes.
@@ -183,7 +183,7 @@ func IsMSize(op Op, n uint) bool {
// IsMReg returns true if op is a register that can be used in a memory operand.
func IsMReg(op Op) bool {
return IsPseudo(op) || IsRegisterKind(op, reg.GP)
return IsPseudo(op) || IsRegisterKind(op, reg.KindGP)
}
// IsM128 returns true if op is a 128-bit memory operand.

View File

@@ -69,7 +69,7 @@ func TestChecks(t *testing.T) {
{IsR64, reg.R10, true},
{IsR64, reg.EBX, false},
// SIMD registers
// Vector registers
{IsXmm0, reg.X0, true},
{IsXmm0, reg.X13, false},
{IsXmm0, reg.Y3, false},

View File

@@ -18,6 +18,7 @@ type Allocator struct {
allocation reg.Allocation
edges []*edge
possible map[reg.Virtual][]reg.Physical
vidtopid map[reg.VID]reg.PID
}
func NewAllocator(rs []reg.Physical) (*Allocator, error) {
@@ -28,6 +29,7 @@ func NewAllocator(rs []reg.Physical) (*Allocator, error) {
registers: rs,
allocation: reg.NewEmptyAllocation(),
possible: map[reg.Virtual][]reg.Physical{},
vidtopid: map[reg.VID]reg.PID{},
}, nil
}
@@ -60,7 +62,7 @@ func (a *Allocator) Add(r reg.Register) {
if _, found := a.possible[v]; found {
return
}
a.possible[v] = a.possibleregisters(v.Bytes())
a.possible[v] = a.possibleregisters(v)
}
func (a *Allocator) Allocate() (reg.Allocation, error) {
@@ -83,6 +85,16 @@ func (a *Allocator) Allocate() (reg.Allocation, error) {
// update possible allocations based on edges.
func (a *Allocator) update() error {
for v := range a.possible {
pid, found := a.vidtopid[v.VirtualID()]
if !found {
continue
}
a.possible[v] = filterregisters(a.possible[v], func(r reg.Physical) bool {
return r.PhysicalID() == pid
})
}
var rem []*edge
for _, e := range a.edges {
e.X, e.Y = a.allocation.LookupDefault(e.X), a.allocation.LookupDefault(e.Y)
@@ -107,6 +119,7 @@ func (a *Allocator) update() error {
}
}
a.edges = rem
return nil
}
@@ -125,13 +138,12 @@ func (a *Allocator) mostrestricted() reg.Virtual {
// discardconflicting removes registers from vs possible list that conflict with p.
func (a *Allocator) discardconflicting(v reg.Virtual, p reg.Physical) {
var rs []reg.Physical
for _, r := range a.possible[v] {
if !reg.AreConflicting(r, p) {
rs = append(rs, r)
a.possible[v] = filterregisters(a.possible[v], func(r reg.Physical) bool {
if pid, found := a.vidtopid[v.VirtualID()]; found && pid == p.PhysicalID() {
return true
}
}
a.possible[v] = rs
return !reg.AreConflicting(r, p)
})
}
// alloc attempts to allocate a register to v.
@@ -140,8 +152,10 @@ func (a *Allocator) alloc(v reg.Virtual) error {
if len(ps) == 0 {
return errors.New("failed to allocate registers")
}
a.allocation[v] = ps[0]
p := ps[0]
a.allocation[v] = p
delete(a.possible, v)
a.vidtopid[v.VirtualID()] = p.PhysicalID()
return nil
}
@@ -150,11 +164,17 @@ func (a *Allocator) remaining() int {
return len(a.possible)
}
// possibleregisters returns all allocate-able registers of the given size.
func (a *Allocator) possibleregisters(n uint) []reg.Physical {
// possibleregisters returns all allocate-able registers for the given virtual.
func (a *Allocator) possibleregisters(v reg.Virtual) []reg.Physical {
return filterregisters(a.registers, func(r reg.Physical) bool {
return v.SatisfiedBy(r) && (r.Info()&reg.Restricted) == 0
})
}
func filterregisters(in []reg.Physical, predicate func(reg.Physical) bool) []reg.Physical {
var rs []reg.Physical
for _, r := range a.registers {
if r.Bytes() == n && (r.Info()&reg.Restricted) == 0 {
for _, r := range in {
if predicate(r) {
rs = append(rs, r)
}
}

View File

@@ -10,7 +10,7 @@ func TestAllocatorSimple(t *testing.T) {
c := reg.NewCollection()
x, y := c.Xv(), c.Yv()
a, err := NewAllocatorForKind(reg.SSEAVX)
a, err := NewAllocatorForKind(reg.KindVector)
if err != nil {
t.Fatal(err)
}
@@ -32,7 +32,7 @@ func TestAllocatorSimple(t *testing.T) {
}
func TestAllocatorImpossible(t *testing.T) {
a, err := NewAllocatorForKind(reg.SSEAVX)
a, err := NewAllocatorForKind(reg.KindVector)
if err != nil {
t.Fatal(err)
}

View File

@@ -16,18 +16,20 @@ func (c *Collection) VirtualRegister(k Kind, s Size) Virtual {
return NewVirtual(vid, k, s)
}
func (c *Collection) GP8v() Virtual { return c.GPv(B8) }
func (c *Collection) GP8v() GPVirtual { return c.GPv(B8) }
func (c *Collection) GP16v() Virtual { return c.GPv(B16) }
func (c *Collection) GP16v() GPVirtual { return c.GPv(B16) }
func (c *Collection) GP32v() Virtual { return c.GPv(B32) }
func (c *Collection) GP32v() GPVirtual { return c.GPv(B32) }
func (c *Collection) GP64v() Virtual { return c.GPv(B64) }
func (c *Collection) GP64v() GPVirtual { return c.GPv(B64) }
func (c *Collection) GPv(s Size) Virtual { return c.VirtualRegister(GP, s) }
func (c *Collection) GPv(s Size) GPVirtual { return newgpv(c.VirtualRegister(KindGP, s)) }
func (c *Collection) Xv() Virtual { return c.VirtualRegister(SSEAVX, B128) }
func (c *Collection) Xv() VecVirtual { return c.Vecv(B128) }
func (c *Collection) Yv() Virtual { return c.VirtualRegister(SSEAVX, B256) }
func (c *Collection) Yv() VecVirtual { return c.Vecv(B256) }
func (c *Collection) Zv() Virtual { return c.VirtualRegister(SSEAVX, B512) }
func (c *Collection) Zv() VecVirtual { return c.Vecv(B512) }
func (c *Collection) Vecv(s Size) VecVirtual { return newvecv(c.VirtualRegister(KindVector, s)) }

View File

@@ -64,3 +64,74 @@ func TestAreConflicting(t *testing.T) {
}
}
}
func TestFamilyLookup(t *testing.T) {
cases := []struct {
Family *Family
ID PID
Spec Spec
Expect Physical
}{
{GeneralPurpose, 0, S8, AL},
{GeneralPurpose, 1, S8L, CL},
{GeneralPurpose, 2, S8H, DH},
{GeneralPurpose, 3, S16, BX},
{GeneralPurpose, 9, S32, R9L},
{GeneralPurpose, 13, S64, R13},
{GeneralPurpose, 13, S512, nil},
{GeneralPurpose, 133, S64, nil},
{Vector, 1, S128, X1},
{Vector, 13, S256, Y13},
{Vector, 27, S512, Z27},
{Vector, 1, S16, nil},
{Vector, 299, S256, nil},
}
for _, c := range cases {
got := c.Family.Lookup(c.ID, c.Spec)
if got != c.Expect {
t.Errorf("pid=%v spec=%v: lookup got %v expect %v", c.ID, c.Spec, got, c.Expect)
}
}
}
func TestPhysicalAs(t *testing.T) {
cases := []struct {
Register Physical
Spec Spec
Expect Physical
}{
{DX, S8L, DL},
{DX, S8H, DH},
{DX, S8, DL},
{DX, S16, DX},
{DX, S32, EDX},
{DX, S64, RDX},
{DX, S256, nil},
}
for _, c := range cases {
got := c.Register.as(c.Spec)
if got != c.Expect {
t.Errorf("%s.as(%v) = %v; expect %v", c.Register.Asm(), c.Spec, got, c.Expect)
}
}
}
func TestVirtualAs(t *testing.T) {
cases := []struct {
Virtual Register
Physical Physical
Match bool
}{
{GeneralPurpose.Virtual(0, B8), CL, true},
{GeneralPurpose.Virtual(0, B8), CH, true},
{GeneralPurpose.Virtual(0, B32).as(S8L), CL, true},
{GeneralPurpose.Virtual(0, B32).as(S8L), CH, false},
{GeneralPurpose.Virtual(0, B16).as(S32), R9L, true},
{GeneralPurpose.Virtual(0, B16).as(S32), R9, false},
}
for _, c := range cases {
if c.Virtual.(Virtual).SatisfiedBy(c.Physical) != c.Match {
t.Errorf("%s.SatisfiedBy(%v) != %v", c.Virtual.Asm(), c.Physical, c.Match)
}
}
}

View File

@@ -4,10 +4,10 @@ import "testing"
func TestSetRegisterIdentity(t *testing.T) {
rs := []Register{
NewVirtual(42, GP, B32),
NewVirtual(43, GP, B32),
NewVirtual(42, SSEAVX, B32),
NewVirtual(42, GP, B64),
NewVirtual(42, KindGP, B32),
NewVirtual(43, KindGP, B32),
NewVirtual(42, KindVector, B32),
NewVirtual(42, KindGP, B64),
AL, AH, CL,
AX, R13W,
EDX, R9L,
@@ -27,7 +27,7 @@ func TestSetRegisterIdentity(t *testing.T) {
}
func TestSetFamilyRegisters(t *testing.T) {
fs := []*Family{GeneralPurpose, SIMD}
fs := []*Family{GeneralPurpose, Vector}
s := NewEmptySet()
expect := 0
for _, f := range fs {

View File

@@ -26,24 +26,17 @@ type Family struct {
registers []Physical
}
func (f *Family) add(s Spec, id PID, name string, info Info) Physical {
r := register{
id: id,
kind: f.Kind,
name: name,
info: info,
Spec: s,
}
f.registers = append(f.registers, r)
func (f *Family) define(s Spec, id PID, name string, flags ...Info) Physical {
r := newregister(f, s, id, name, flags...)
f.add(r)
return r
}
func (f *Family) define(s Spec, id PID, name string) Physical {
return f.add(s, id, name, None)
}
func (f *Family) restricted(s Spec, id PID, name string) Physical {
return f.add(s, id, name, Restricted)
func (f *Family) add(r Physical) {
if r.Kind() != f.Kind {
panic("bad kind")
}
f.registers = append(f.registers, r)
}
func (f *Family) Virtual(id VID, s Size) Virtual {
@@ -64,6 +57,16 @@ func (f *Family) Set() Set {
return s
}
// Lookup returns the register with given physical ID and spec. Returns nil if no such register exists.
func (f *Family) Lookup(id PID, s Spec) Physical {
for _, r := range f.registers {
if r.PhysicalID() == id && r.Mask() == s.Mask() {
return r
}
}
return nil
}
type (
VID uint16
PID uint16
@@ -73,11 +76,13 @@ type Register interface {
Kind() Kind
Bytes() uint
Asm() string
as(Spec) Register
register()
}
type Virtual interface {
VirtualID() VID
SatisfiedBy(Physical) bool
Register
}
@@ -93,6 +98,7 @@ type virtual struct {
id VID
kind Kind
Size
mask uint16
}
func NewVirtual(id VID, k Kind, s Size) Virtual {
@@ -111,6 +117,19 @@ func (v virtual) Asm() string {
return fmt.Sprintf("<virtual:%v:%v:%v>", v.id, v.Kind(), v.Bytes())
}
func (v virtual) SatisfiedBy(p Physical) bool {
return v.Kind() == p.Kind() && v.Bytes() == p.Bytes() && (v.mask == 0 || v.mask == p.Mask())
}
func (v virtual) as(s Spec) Register {
return virtual{
id: v.id,
kind: v.kind,
Size: Size(s.Bytes()),
mask: s.Mask(),
}
}
func (v virtual) register() {}
type Info uint8
@@ -136,17 +155,36 @@ func ToPhysical(r Register) Physical {
}
type register struct {
family *Family
id PID
kind Kind
name string
info Info
Spec
}
func newregister(f *Family, s Spec, id PID, name string, flags ...Info) register {
r := register{
family: f,
id: id,
name: name,
info: None,
Spec: s,
}
for _, flag := range flags {
r.info |= flag
}
return r
}
func (r register) PhysicalID() PID { return r.id }
func (r register) Kind() Kind { return r.kind }
func (r register) Kind() Kind { return r.family.Kind }
func (r register) Asm() string { return r.name }
func (r register) Info() Info { return r.info }
func (r register) as(s Spec) Register {
return r.family.Lookup(r.PhysicalID(), s)
}
func (r register) register() {}
type Spec uint16

View File

@@ -1,19 +1,24 @@
package reg
// Register families.
// Register kinds.
const (
Internal Kind = iota
GP
MMX
SSEAVX
Mask
KindPseudo Kind = iota
KindGP
KindVector
)
var Families = []*Family{
// Declare register families.
var (
Pseudo = &Family{Kind: KindPseudo}
GeneralPurpose = &Family{Kind: KindGP}
Vector = &Family{Kind: KindVector}
Families = []*Family{
Pseudo,
GeneralPurpose,
SIMD,
}
Vector,
}
)
var familiesByKind = map[Kind]*Family{}
@@ -29,202 +34,291 @@ func FamilyOfKind(k Kind) *Family {
// Pseudo registers.
var (
Pseudo = &Family{Kind: Internal}
FramePointer = Pseudo.define(S0, 0, "FP")
ProgramCounter = Pseudo.define(S0, 0, "PC")
StaticBase = Pseudo.define(S0, 0, "SB")
StackPointer = Pseudo.define(S0, 0, "SP")
)
// GP is the interface for a general purpose register.
type GP interface {
As8() Register
As8L() Register
As8H() Register
As16() Register
As32() Register
As64() Register
}
type gpcasts struct {
Register
}
func (c gpcasts) As8() Register { return c.as(S8) }
func (c gpcasts) As8L() Register { return c.as(S8L) }
func (c gpcasts) As8H() Register { return c.as(S8H) }
func (c gpcasts) As16() Register { return c.as(S16) }
func (c gpcasts) As32() Register { return c.as(S32) }
func (c gpcasts) As64() Register { return c.as(S64) }
type GPPhysical interface {
Physical
GP
}
type gpp struct {
Physical
GP
}
func newgpp(r Physical) GPPhysical { return gpp{Physical: r, GP: gpcasts{r}} }
type GPVirtual interface {
Virtual
GP
}
type gpv struct {
Virtual
GP
}
func newgpv(v Virtual) GPVirtual { return gpv{Virtual: v, GP: gpcasts{v}} }
func gp(s Spec, id PID, name string, flags ...Info) GPPhysical {
r := newgpp(newregister(GeneralPurpose, s, id, name, flags...))
GeneralPurpose.add(r)
return r
}
// General purpose registers.
var (
GeneralPurpose = &Family{Kind: GP}
// Low byte
AL = GeneralPurpose.define(S8L, 0, "AL")
CL = GeneralPurpose.define(S8L, 1, "CL")
DL = GeneralPurpose.define(S8L, 2, "DL")
BL = GeneralPurpose.define(S8L, 3, "BL")
AL = gp(S8L, 0, "AL")
CL = gp(S8L, 1, "CL")
DL = gp(S8L, 2, "DL")
BL = gp(S8L, 3, "BL")
// High byte
AH = GeneralPurpose.define(S8H, 0, "AH")
CH = GeneralPurpose.define(S8H, 1, "CH")
DH = GeneralPurpose.define(S8H, 2, "DH")
BH = GeneralPurpose.define(S8H, 3, "BH")
AH = gp(S8H, 0, "AH")
CH = gp(S8H, 1, "CH")
DH = gp(S8H, 2, "DH")
BH = gp(S8H, 3, "BH")
// 8-bit
SPB = GeneralPurpose.restricted(S8, 4, "SP")
BPB = GeneralPurpose.define(S8, 5, "BP")
SIB = GeneralPurpose.define(S8, 6, "SI")
DIB = GeneralPurpose.define(S8, 7, "DI")
R8B = GeneralPurpose.define(S8, 8, "R8")
R9B = GeneralPurpose.define(S8, 9, "R9")
R10B = GeneralPurpose.define(S8, 10, "R10")
R11B = GeneralPurpose.define(S8, 11, "R11")
R12B = GeneralPurpose.define(S8, 12, "R12")
R13B = GeneralPurpose.define(S8, 13, "R13")
R14B = GeneralPurpose.define(S8, 14, "R14")
R15B = GeneralPurpose.define(S8, 15, "R15")
SPB = gp(S8, 4, "SP", Restricted)
BPB = gp(S8, 5, "BP")
SIB = gp(S8, 6, "SI")
DIB = gp(S8, 7, "DI")
R8B = gp(S8, 8, "R8")
R9B = gp(S8, 9, "R9")
R10B = gp(S8, 10, "R10")
R11B = gp(S8, 11, "R11")
R12B = gp(S8, 12, "R12")
R13B = gp(S8, 13, "R13")
R14B = gp(S8, 14, "R14")
R15B = gp(S8, 15, "R15")
// 16-bit
AX = GeneralPurpose.define(S16, 0, "AX")
CX = GeneralPurpose.define(S16, 1, "CX")
DX = GeneralPurpose.define(S16, 2, "DX")
BX = GeneralPurpose.define(S16, 3, "BX")
SP = GeneralPurpose.restricted(S16, 4, "SP")
BP = GeneralPurpose.define(S16, 5, "BP")
SI = GeneralPurpose.define(S16, 6, "SI")
DI = GeneralPurpose.define(S16, 7, "DI")
R8W = GeneralPurpose.define(S16, 8, "R8")
R9W = GeneralPurpose.define(S16, 9, "R9")
R10W = GeneralPurpose.define(S16, 10, "R10")
R11W = GeneralPurpose.define(S16, 11, "R11")
R12W = GeneralPurpose.define(S16, 12, "R12")
R13W = GeneralPurpose.define(S16, 13, "R13")
R14W = GeneralPurpose.define(S16, 14, "R14")
R15W = GeneralPurpose.define(S16, 15, "R15")
AX = gp(S16, 0, "AX")
CX = gp(S16, 1, "CX")
DX = gp(S16, 2, "DX")
BX = gp(S16, 3, "BX")
SP = gp(S16, 4, "SP", Restricted)
BP = gp(S16, 5, "BP")
SI = gp(S16, 6, "SI")
DI = gp(S16, 7, "DI")
R8W = gp(S16, 8, "R8")
R9W = gp(S16, 9, "R9")
R10W = gp(S16, 10, "R10")
R11W = gp(S16, 11, "R11")
R12W = gp(S16, 12, "R12")
R13W = gp(S16, 13, "R13")
R14W = gp(S16, 14, "R14")
R15W = gp(S16, 15, "R15")
// 32-bit
EAX = GeneralPurpose.define(S32, 0, "AX")
ECX = GeneralPurpose.define(S32, 1, "CX")
EDX = GeneralPurpose.define(S32, 2, "DX")
EBX = GeneralPurpose.define(S32, 3, "BX")
ESP = GeneralPurpose.restricted(S32, 4, "SP")
EBP = GeneralPurpose.define(S32, 5, "BP")
ESI = GeneralPurpose.define(S32, 6, "SI")
EDI = GeneralPurpose.define(S32, 7, "DI")
R8L = GeneralPurpose.define(S32, 8, "R8")
R9L = GeneralPurpose.define(S32, 9, "R9")
R10L = GeneralPurpose.define(S32, 10, "R10")
R11L = GeneralPurpose.define(S32, 11, "R11")
R12L = GeneralPurpose.define(S32, 12, "R12")
R13L = GeneralPurpose.define(S32, 13, "R13")
R14L = GeneralPurpose.define(S32, 14, "R14")
R15L = GeneralPurpose.define(S32, 15, "R15")
EAX = gp(S32, 0, "AX")
ECX = gp(S32, 1, "CX")
EDX = gp(S32, 2, "DX")
EBX = gp(S32, 3, "BX")
ESP = gp(S32, 4, "SP", Restricted)
EBP = gp(S32, 5, "BP")
ESI = gp(S32, 6, "SI")
EDI = gp(S32, 7, "DI")
R8L = gp(S32, 8, "R8")
R9L = gp(S32, 9, "R9")
R10L = gp(S32, 10, "R10")
R11L = gp(S32, 11, "R11")
R12L = gp(S32, 12, "R12")
R13L = gp(S32, 13, "R13")
R14L = gp(S32, 14, "R14")
R15L = gp(S32, 15, "R15")
// 64-bit
RAX = GeneralPurpose.define(S64, 0, "AX")
RCX = GeneralPurpose.define(S64, 1, "CX")
RDX = GeneralPurpose.define(S64, 2, "DX")
RBX = GeneralPurpose.define(S64, 3, "BX")
RSP = GeneralPurpose.restricted(S64, 4, "SP")
RBP = GeneralPurpose.define(S64, 5, "BP")
RSI = GeneralPurpose.define(S64, 6, "SI")
RDI = GeneralPurpose.define(S64, 7, "DI")
R8 = GeneralPurpose.define(S64, 8, "R8")
R9 = GeneralPurpose.define(S64, 9, "R9")
R10 = GeneralPurpose.define(S64, 10, "R10")
R11 = GeneralPurpose.define(S64, 11, "R11")
R12 = GeneralPurpose.define(S64, 12, "R12")
R13 = GeneralPurpose.define(S64, 13, "R13")
R14 = GeneralPurpose.define(S64, 14, "R14")
R15 = GeneralPurpose.define(S64, 15, "R15")
RAX = gp(S64, 0, "AX")
RCX = gp(S64, 1, "CX")
RDX = gp(S64, 2, "DX")
RBX = gp(S64, 3, "BX")
RSP = gp(S64, 4, "SP", Restricted)
RBP = gp(S64, 5, "BP")
RSI = gp(S64, 6, "SI")
RDI = gp(S64, 7, "DI")
R8 = gp(S64, 8, "R8")
R9 = gp(S64, 9, "R9")
R10 = gp(S64, 10, "R10")
R11 = gp(S64, 11, "R11")
R12 = gp(S64, 12, "R12")
R13 = gp(S64, 13, "R13")
R14 = gp(S64, 14, "R14")
R15 = gp(S64, 15, "R15")
)
// SIMD registers.
var (
SIMD = &Family{Kind: SSEAVX}
type Vec interface {
AsX() Register
AsY() Register
AsZ() Register
}
type veccasts struct {
Register
}
func (c veccasts) AsX() Register { return c.as(S128) }
func (c veccasts) AsY() Register { return c.as(S256) }
func (c veccasts) AsZ() Register { return c.as(S512) }
type VecPhysical interface {
Physical
Vec
}
type vecp struct {
Physical
Vec
}
func newvecp(r Physical) VecPhysical { return vecp{Physical: r, Vec: veccasts{r}} }
type VecVirtual interface {
Virtual
Vec
}
type vecv struct {
Virtual
Vec
}
func newvecv(v Virtual) VecVirtual { return vecv{Virtual: v, Vec: veccasts{v}} }
func vec(s Spec, id PID, name string, flags ...Info) VecPhysical {
r := newvecp(newregister(Vector, s, id, name, flags...))
Vector.add(r)
return r
}
// Vector registers.
var (
// 128-bit
X0 = SIMD.define(S128, 0, "X0")
X1 = SIMD.define(S128, 1, "X1")
X2 = SIMD.define(S128, 2, "X2")
X3 = SIMD.define(S128, 3, "X3")
X4 = SIMD.define(S128, 4, "X4")
X5 = SIMD.define(S128, 5, "X5")
X6 = SIMD.define(S128, 6, "X6")
X7 = SIMD.define(S128, 7, "X7")
X8 = SIMD.define(S128, 8, "X8")
X9 = SIMD.define(S128, 9, "X9")
X10 = SIMD.define(S128, 10, "X10")
X11 = SIMD.define(S128, 11, "X11")
X12 = SIMD.define(S128, 12, "X12")
X13 = SIMD.define(S128, 13, "X13")
X14 = SIMD.define(S128, 14, "X14")
X15 = SIMD.define(S128, 15, "X15")
X16 = SIMD.define(S128, 16, "X16")
X17 = SIMD.define(S128, 17, "X17")
X18 = SIMD.define(S128, 18, "X18")
X19 = SIMD.define(S128, 19, "X19")
X20 = SIMD.define(S128, 20, "X20")
X21 = SIMD.define(S128, 21, "X21")
X22 = SIMD.define(S128, 22, "X22")
X23 = SIMD.define(S128, 23, "X23")
X24 = SIMD.define(S128, 24, "X24")
X25 = SIMD.define(S128, 25, "X25")
X26 = SIMD.define(S128, 26, "X26")
X27 = SIMD.define(S128, 27, "X27")
X28 = SIMD.define(S128, 28, "X28")
X29 = SIMD.define(S128, 29, "X29")
X30 = SIMD.define(S128, 30, "X30")
X31 = SIMD.define(S128, 31, "X31")
X0 = vec(S128, 0, "X0")
X1 = vec(S128, 1, "X1")
X2 = vec(S128, 2, "X2")
X3 = vec(S128, 3, "X3")
X4 = vec(S128, 4, "X4")
X5 = vec(S128, 5, "X5")
X6 = vec(S128, 6, "X6")
X7 = vec(S128, 7, "X7")
X8 = vec(S128, 8, "X8")
X9 = vec(S128, 9, "X9")
X10 = vec(S128, 10, "X10")
X11 = vec(S128, 11, "X11")
X12 = vec(S128, 12, "X12")
X13 = vec(S128, 13, "X13")
X14 = vec(S128, 14, "X14")
X15 = vec(S128, 15, "X15")
X16 = vec(S128, 16, "X16")
X17 = vec(S128, 17, "X17")
X18 = vec(S128, 18, "X18")
X19 = vec(S128, 19, "X19")
X20 = vec(S128, 20, "X20")
X21 = vec(S128, 21, "X21")
X22 = vec(S128, 22, "X22")
X23 = vec(S128, 23, "X23")
X24 = vec(S128, 24, "X24")
X25 = vec(S128, 25, "X25")
X26 = vec(S128, 26, "X26")
X27 = vec(S128, 27, "X27")
X28 = vec(S128, 28, "X28")
X29 = vec(S128, 29, "X29")
X30 = vec(S128, 30, "X30")
X31 = vec(S128, 31, "X31")
// 256-bit
Y0 = SIMD.define(S256, 0, "Y0")
Y1 = SIMD.define(S256, 1, "Y1")
Y2 = SIMD.define(S256, 2, "Y2")
Y3 = SIMD.define(S256, 3, "Y3")
Y4 = SIMD.define(S256, 4, "Y4")
Y5 = SIMD.define(S256, 5, "Y5")
Y6 = SIMD.define(S256, 6, "Y6")
Y7 = SIMD.define(S256, 7, "Y7")
Y8 = SIMD.define(S256, 8, "Y8")
Y9 = SIMD.define(S256, 9, "Y9")
Y10 = SIMD.define(S256, 10, "Y10")
Y11 = SIMD.define(S256, 11, "Y11")
Y12 = SIMD.define(S256, 12, "Y12")
Y13 = SIMD.define(S256, 13, "Y13")
Y14 = SIMD.define(S256, 14, "Y14")
Y15 = SIMD.define(S256, 15, "Y15")
Y16 = SIMD.define(S256, 16, "Y16")
Y17 = SIMD.define(S256, 17, "Y17")
Y18 = SIMD.define(S256, 18, "Y18")
Y19 = SIMD.define(S256, 19, "Y19")
Y20 = SIMD.define(S256, 20, "Y20")
Y21 = SIMD.define(S256, 21, "Y21")
Y22 = SIMD.define(S256, 22, "Y22")
Y23 = SIMD.define(S256, 23, "Y23")
Y24 = SIMD.define(S256, 24, "Y24")
Y25 = SIMD.define(S256, 25, "Y25")
Y26 = SIMD.define(S256, 26, "Y26")
Y27 = SIMD.define(S256, 27, "Y27")
Y28 = SIMD.define(S256, 28, "Y28")
Y29 = SIMD.define(S256, 29, "Y29")
Y30 = SIMD.define(S256, 30, "Y30")
Y31 = SIMD.define(S256, 31, "Y31")
Y0 = vec(S256, 0, "Y0")
Y1 = vec(S256, 1, "Y1")
Y2 = vec(S256, 2, "Y2")
Y3 = vec(S256, 3, "Y3")
Y4 = vec(S256, 4, "Y4")
Y5 = vec(S256, 5, "Y5")
Y6 = vec(S256, 6, "Y6")
Y7 = vec(S256, 7, "Y7")
Y8 = vec(S256, 8, "Y8")
Y9 = vec(S256, 9, "Y9")
Y10 = vec(S256, 10, "Y10")
Y11 = vec(S256, 11, "Y11")
Y12 = vec(S256, 12, "Y12")
Y13 = vec(S256, 13, "Y13")
Y14 = vec(S256, 14, "Y14")
Y15 = vec(S256, 15, "Y15")
Y16 = vec(S256, 16, "Y16")
Y17 = vec(S256, 17, "Y17")
Y18 = vec(S256, 18, "Y18")
Y19 = vec(S256, 19, "Y19")
Y20 = vec(S256, 20, "Y20")
Y21 = vec(S256, 21, "Y21")
Y22 = vec(S256, 22, "Y22")
Y23 = vec(S256, 23, "Y23")
Y24 = vec(S256, 24, "Y24")
Y25 = vec(S256, 25, "Y25")
Y26 = vec(S256, 26, "Y26")
Y27 = vec(S256, 27, "Y27")
Y28 = vec(S256, 28, "Y28")
Y29 = vec(S256, 29, "Y29")
Y30 = vec(S256, 30, "Y30")
Y31 = vec(S256, 31, "Y31")
// 512-bit
Z0 = SIMD.define(S512, 0, "Z0")
Z1 = SIMD.define(S512, 1, "Z1")
Z2 = SIMD.define(S512, 2, "Z2")
Z3 = SIMD.define(S512, 3, "Z3")
Z4 = SIMD.define(S512, 4, "Z4")
Z5 = SIMD.define(S512, 5, "Z5")
Z6 = SIMD.define(S512, 6, "Z6")
Z7 = SIMD.define(S512, 7, "Z7")
Z8 = SIMD.define(S512, 8, "Z8")
Z9 = SIMD.define(S512, 9, "Z9")
Z10 = SIMD.define(S512, 10, "Z10")
Z11 = SIMD.define(S512, 11, "Z11")
Z12 = SIMD.define(S512, 12, "Z12")
Z13 = SIMD.define(S512, 13, "Z13")
Z14 = SIMD.define(S512, 14, "Z14")
Z15 = SIMD.define(S512, 15, "Z15")
Z16 = SIMD.define(S512, 16, "Z16")
Z17 = SIMD.define(S512, 17, "Z17")
Z18 = SIMD.define(S512, 18, "Z18")
Z19 = SIMD.define(S512, 19, "Z19")
Z20 = SIMD.define(S512, 20, "Z20")
Z21 = SIMD.define(S512, 21, "Z21")
Z22 = SIMD.define(S512, 22, "Z22")
Z23 = SIMD.define(S512, 23, "Z23")
Z24 = SIMD.define(S512, 24, "Z24")
Z25 = SIMD.define(S512, 25, "Z25")
Z26 = SIMD.define(S512, 26, "Z26")
Z27 = SIMD.define(S512, 27, "Z27")
Z28 = SIMD.define(S512, 28, "Z28")
Z29 = SIMD.define(S512, 29, "Z29")
Z30 = SIMD.define(S512, 30, "Z30")
Z31 = SIMD.define(S512, 31, "Z31")
Z0 = vec(S512, 0, "Z0")
Z1 = vec(S512, 1, "Z1")
Z2 = vec(S512, 2, "Z2")
Z3 = vec(S512, 3, "Z3")
Z4 = vec(S512, 4, "Z4")
Z5 = vec(S512, 5, "Z5")
Z6 = vec(S512, 6, "Z6")
Z7 = vec(S512, 7, "Z7")
Z8 = vec(S512, 8, "Z8")
Z9 = vec(S512, 9, "Z9")
Z10 = vec(S512, 10, "Z10")
Z11 = vec(S512, 11, "Z11")
Z12 = vec(S512, 12, "Z12")
Z13 = vec(S512, 13, "Z13")
Z14 = vec(S512, 14, "Z14")
Z15 = vec(S512, 15, "Z15")
Z16 = vec(S512, 16, "Z16")
Z17 = vec(S512, 17, "Z17")
Z18 = vec(S512, 18, "Z18")
Z19 = vec(S512, 19, "Z19")
Z20 = vec(S512, 20, "Z20")
Z21 = vec(S512, 21, "Z21")
Z22 = vec(S512, 22, "Z22")
Z23 = vec(S512, 23, "Z23")
Z24 = vec(S512, 24, "Z24")
Z25 = vec(S512, 25, "Z25")
Z26 = vec(S512, 26, "Z26")
Z27 = vec(S512, 27, "Z27")
Z28 = vec(S512, 28, "Z28")
Z29 = vec(S512, 29, "Z29")
Z30 = vec(S512, 30, "Z30")
Z31 = vec(S512, 31, "Z31")
)

28
reg/x86_test.go Normal file
View File

@@ -0,0 +1,28 @@
package reg
import "testing"
func TestAsMethods(t *testing.T) {
cases := [][2]Register{
{RAX.As8(), AL},
{ECX.As8L(), CL},
{EBX.As8H(), BH},
{R9B.As16(), R9W},
{DH.As32(), EDX},
{R14L.As64(), R14},
{X2.AsX(), X2},
{X4.AsY(), Y4},
{X9.AsZ(), Z9},
{Y2.AsX(), X2},
{Y4.AsY(), Y4},
{Y9.AsZ(), Z9},
{Z2.AsX(), X2},
{Z4.AsY(), Y4},
{Z9.AsZ(), Z9},
}
for _, c := range cases {
if c[0] != c[1] {
t.FailNow()
}
}
}

View File

@@ -1,8 +1,8 @@
#!/bin/bash -ex
# Separate core packages from those that depend on the whole library being built.
core=$(go list ./... | grep -v examples)
post=$(go list ./... | grep examples)
core=$(go list ./... | grep -Ev 'avo/(examples|tests)')
post=$(go list ./... | grep -E 'avo/(examples|tests)')
# Install avogen (for bootstrapping).
go install ./internal/cmd/avogen

24
tests/cast/asm.go Normal file
View File

@@ -0,0 +1,24 @@
// +build ignore
package main
import (
. "github.com/mmcloughlin/avo/build"
)
func main() {
TEXT("Split", "func(x uint64) (q uint64, l uint32, w uint16, b uint8)")
Doc(
"Split returns the low 64, 32, 16 and 8 bits of x.",
"Tests the As() methods of virtual general-purpose registers.",
)
x := GP64v()
Load(Param("x"), x)
Store(x, Return("q"))
Store(x.As32(), Return("l"))
Store(x.As16(), Return("w"))
Store(x.As8(), Return("b"))
RET()
Generate()
}

12
tests/cast/cast.s Normal file
View File

@@ -0,0 +1,12 @@
// Code generated by command: go run asm.go -out cast.s -stubs stub.go. DO NOT EDIT.
#include "textflag.h"
// func Split(x uint64) (q uint64, l uint32, w uint16, b uint8)
TEXT ·Split(SB), 0, $0-23
MOVQ x(FP), AX
MOVQ AX, q+8(FP)
MOVL AX, l+16(FP)
MOVW AX, w+20(FP)
MOVB AL, b+22(FP)
RET

17
tests/cast/cast_test.go Normal file
View File

@@ -0,0 +1,17 @@
package cast
import (
"testing"
"testing/quick"
)
//go:generate go run asm.go -out cast.s -stubs stub.go
func TestSplit(t *testing.T) {
expect := func(x uint64) (uint64, uint32, uint16, uint8) {
return x, uint32(x), uint16(x), uint8(x)
}
if err := quick.CheckEqual(Split, expect, nil); err != nil {
t.Fatal(err)
}
}

7
tests/cast/stub.go Normal file
View File

@@ -0,0 +1,7 @@
// Code generated by command: go run asm.go -out cast.s -stubs stub.go. DO NOT EDIT.
package cast
// Split returns the low 64, 32, 16 and 8 bits of x.
// Tests the As() methods of virtual general-purpose registers.
func Split(x uint64) (q uint64, l uint32, w uint16, b uint8)